Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__init__.py +34 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/api.py +358 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/chart.py +480 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/combinator.py +339 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/lexicon.py +338 -0
- env-llmeval/lib/python3.10/site-packages/nltk/ccg/logic.py +60 -0
- env-llmeval/lib/python3.10/site-packages/nltk/chunk/api.py +56 -0
- env-llmeval/lib/python3.10/site-packages/nltk/chunk/named_entity.py +352 -0
- env-llmeval/lib/python3.10/site-packages/nltk/chunk/util.py +643 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__init__.py +92 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__init__.py +51 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc
ADDED
Binary file (3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc
ADDED
Binary file (1.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc
ADDED
Binary file (23.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc
ADDED
Binary file (15 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc
ADDED
Binary file (1.14 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc
ADDED
Binary file (38.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc
ADDED
Binary file (6.43 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc
ADDED
Binary file (61.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc
ADDED
Binary file (73.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc
ADDED
Binary file (53.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc
ADDED
Binary file (1.61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc
ADDED
Binary file (28.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc
ADDED
Binary file (2.33 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc
ADDED
Binary file (21.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc
ADDED
Binary file (3.73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc
ADDED
Binary file (86.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc
ADDED
Binary file (28.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc
ADDED
Binary file (33.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc
ADDED
Binary file (958 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc
ADDED
Binary file (5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc
ADDED
Binary file (32.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc
ADDED
Binary file (1.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__init__.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Combinatory Categorial Grammar.
|
10 |
+
|
11 |
+
For more information see nltk/doc/contrib/ccg/ccg.pdf
|
12 |
+
"""
|
13 |
+
|
14 |
+
from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge
|
15 |
+
from nltk.ccg.combinator import (
|
16 |
+
BackwardApplication,
|
17 |
+
BackwardBx,
|
18 |
+
BackwardCombinator,
|
19 |
+
BackwardComposition,
|
20 |
+
BackwardSx,
|
21 |
+
BackwardT,
|
22 |
+
DirectedBinaryCombinator,
|
23 |
+
ForwardApplication,
|
24 |
+
ForwardCombinator,
|
25 |
+
ForwardComposition,
|
26 |
+
ForwardSubstitution,
|
27 |
+
ForwardT,
|
28 |
+
UndirectedBinaryCombinator,
|
29 |
+
UndirectedComposition,
|
30 |
+
UndirectedFunctionApplication,
|
31 |
+
UndirectedSubstitution,
|
32 |
+
UndirectedTypeRaise,
|
33 |
+
)
|
34 |
+
from nltk.ccg.lexicon import CCGLexicon
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (967 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc
ADDED
Binary file (11.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc
ADDED
Binary file (9.17 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc
ADDED
Binary file (7.89 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc
ADDED
Binary file (1.52 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/api.py
ADDED
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: CCG Categories
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
from abc import ABCMeta, abstractmethod
|
9 |
+
from functools import total_ordering
|
10 |
+
|
11 |
+
from nltk.internals import raise_unorderable_types
|
12 |
+
|
13 |
+
|
14 |
+
@total_ordering
|
15 |
+
class AbstractCCGCategory(metaclass=ABCMeta):
|
16 |
+
"""
|
17 |
+
Interface for categories in combinatory grammars.
|
18 |
+
"""
|
19 |
+
|
20 |
+
@abstractmethod
|
21 |
+
def is_primitive(self):
|
22 |
+
"""
|
23 |
+
Returns true if the category is primitive.
|
24 |
+
"""
|
25 |
+
|
26 |
+
@abstractmethod
|
27 |
+
def is_function(self):
|
28 |
+
"""
|
29 |
+
Returns true if the category is a function application.
|
30 |
+
"""
|
31 |
+
|
32 |
+
@abstractmethod
|
33 |
+
def is_var(self):
|
34 |
+
"""
|
35 |
+
Returns true if the category is a variable.
|
36 |
+
"""
|
37 |
+
|
38 |
+
@abstractmethod
|
39 |
+
def substitute(self, substitutions):
|
40 |
+
"""
|
41 |
+
Takes a set of (var, category) substitutions, and replaces every
|
42 |
+
occurrence of the variable with the corresponding category.
|
43 |
+
"""
|
44 |
+
|
45 |
+
@abstractmethod
|
46 |
+
def can_unify(self, other):
|
47 |
+
"""
|
48 |
+
Determines whether two categories can be unified.
|
49 |
+
- Returns None if they cannot be unified
|
50 |
+
- Returns a list of necessary substitutions if they can.
|
51 |
+
"""
|
52 |
+
|
53 |
+
# Utility functions: comparison, strings and hashing.
|
54 |
+
@abstractmethod
|
55 |
+
def __str__(self):
|
56 |
+
pass
|
57 |
+
|
58 |
+
def __eq__(self, other):
|
59 |
+
return (
|
60 |
+
self.__class__ is other.__class__
|
61 |
+
and self._comparison_key == other._comparison_key
|
62 |
+
)
|
63 |
+
|
64 |
+
def __ne__(self, other):
|
65 |
+
return not self == other
|
66 |
+
|
67 |
+
def __lt__(self, other):
|
68 |
+
if not isinstance(other, AbstractCCGCategory):
|
69 |
+
raise_unorderable_types("<", self, other)
|
70 |
+
if self.__class__ is other.__class__:
|
71 |
+
return self._comparison_key < other._comparison_key
|
72 |
+
else:
|
73 |
+
return self.__class__.__name__ < other.__class__.__name__
|
74 |
+
|
75 |
+
def __hash__(self):
|
76 |
+
try:
|
77 |
+
return self._hash
|
78 |
+
except AttributeError:
|
79 |
+
self._hash = hash(self._comparison_key)
|
80 |
+
return self._hash
|
81 |
+
|
82 |
+
|
83 |
+
class CCGVar(AbstractCCGCategory):
|
84 |
+
"""
|
85 |
+
Class representing a variable CCG category.
|
86 |
+
Used for conjunctions (and possibly type-raising, if implemented as a
|
87 |
+
unary rule).
|
88 |
+
"""
|
89 |
+
|
90 |
+
_maxID = 0
|
91 |
+
|
92 |
+
def __init__(self, prim_only=False):
|
93 |
+
"""Initialize a variable (selects a new identifier)
|
94 |
+
|
95 |
+
:param prim_only: a boolean that determines whether the variable is
|
96 |
+
restricted to primitives
|
97 |
+
:type prim_only: bool
|
98 |
+
"""
|
99 |
+
self._id = self.new_id()
|
100 |
+
self._prim_only = prim_only
|
101 |
+
self._comparison_key = self._id
|
102 |
+
|
103 |
+
@classmethod
|
104 |
+
def new_id(cls):
|
105 |
+
"""
|
106 |
+
A class method allowing generation of unique variable identifiers.
|
107 |
+
"""
|
108 |
+
cls._maxID = cls._maxID + 1
|
109 |
+
return cls._maxID - 1
|
110 |
+
|
111 |
+
@classmethod
|
112 |
+
def reset_id(cls):
|
113 |
+
cls._maxID = 0
|
114 |
+
|
115 |
+
def is_primitive(self):
|
116 |
+
return False
|
117 |
+
|
118 |
+
def is_function(self):
|
119 |
+
return False
|
120 |
+
|
121 |
+
def is_var(self):
|
122 |
+
return True
|
123 |
+
|
124 |
+
def substitute(self, substitutions):
|
125 |
+
"""If there is a substitution corresponding to this variable,
|
126 |
+
return the substituted category.
|
127 |
+
"""
|
128 |
+
for (var, cat) in substitutions:
|
129 |
+
if var == self:
|
130 |
+
return cat
|
131 |
+
return self
|
132 |
+
|
133 |
+
def can_unify(self, other):
|
134 |
+
"""If the variable can be replaced with other
|
135 |
+
a substitution is returned.
|
136 |
+
"""
|
137 |
+
if other.is_primitive() or not self._prim_only:
|
138 |
+
return [(self, other)]
|
139 |
+
return None
|
140 |
+
|
141 |
+
def id(self):
|
142 |
+
return self._id
|
143 |
+
|
144 |
+
def __str__(self):
|
145 |
+
return "_var" + str(self._id)
|
146 |
+
|
147 |
+
|
148 |
+
@total_ordering
|
149 |
+
class Direction:
|
150 |
+
"""
|
151 |
+
Class representing the direction of a function application.
|
152 |
+
Also contains maintains information as to which combinators
|
153 |
+
may be used with the category.
|
154 |
+
"""
|
155 |
+
|
156 |
+
def __init__(self, dir, restrictions):
|
157 |
+
self._dir = dir
|
158 |
+
self._restrs = restrictions
|
159 |
+
self._comparison_key = (dir, tuple(restrictions))
|
160 |
+
|
161 |
+
# Testing the application direction
|
162 |
+
def is_forward(self):
|
163 |
+
return self._dir == "/"
|
164 |
+
|
165 |
+
def is_backward(self):
|
166 |
+
return self._dir == "\\"
|
167 |
+
|
168 |
+
def dir(self):
|
169 |
+
return self._dir
|
170 |
+
|
171 |
+
def restrs(self):
|
172 |
+
"""A list of restrictions on the combinators.
|
173 |
+
'.' denotes that permuting operations are disallowed
|
174 |
+
',' denotes that function composition is disallowed
|
175 |
+
'_' denotes that the direction has variable restrictions.
|
176 |
+
(This is redundant in the current implementation of type-raising)
|
177 |
+
"""
|
178 |
+
return self._restrs
|
179 |
+
|
180 |
+
def is_variable(self):
|
181 |
+
return self._restrs == "_"
|
182 |
+
|
183 |
+
# Unification and substitution of variable directions.
|
184 |
+
# Used only if type-raising is implemented as a unary rule, as it
|
185 |
+
# must inherit restrictions from the argument category.
|
186 |
+
def can_unify(self, other):
|
187 |
+
if other.is_variable():
|
188 |
+
return [("_", self.restrs())]
|
189 |
+
elif self.is_variable():
|
190 |
+
return [("_", other.restrs())]
|
191 |
+
else:
|
192 |
+
if self.restrs() == other.restrs():
|
193 |
+
return []
|
194 |
+
return None
|
195 |
+
|
196 |
+
def substitute(self, subs):
|
197 |
+
if not self.is_variable():
|
198 |
+
return self
|
199 |
+
|
200 |
+
for (var, restrs) in subs:
|
201 |
+
if var == "_":
|
202 |
+
return Direction(self._dir, restrs)
|
203 |
+
return self
|
204 |
+
|
205 |
+
# Testing permitted combinators
|
206 |
+
def can_compose(self):
|
207 |
+
return "," not in self._restrs
|
208 |
+
|
209 |
+
def can_cross(self):
|
210 |
+
return "." not in self._restrs
|
211 |
+
|
212 |
+
def __eq__(self, other):
|
213 |
+
return (
|
214 |
+
self.__class__ is other.__class__
|
215 |
+
and self._comparison_key == other._comparison_key
|
216 |
+
)
|
217 |
+
|
218 |
+
def __ne__(self, other):
|
219 |
+
return not self == other
|
220 |
+
|
221 |
+
def __lt__(self, other):
|
222 |
+
if not isinstance(other, Direction):
|
223 |
+
raise_unorderable_types("<", self, other)
|
224 |
+
if self.__class__ is other.__class__:
|
225 |
+
return self._comparison_key < other._comparison_key
|
226 |
+
else:
|
227 |
+
return self.__class__.__name__ < other.__class__.__name__
|
228 |
+
|
229 |
+
def __hash__(self):
|
230 |
+
try:
|
231 |
+
return self._hash
|
232 |
+
except AttributeError:
|
233 |
+
self._hash = hash(self._comparison_key)
|
234 |
+
return self._hash
|
235 |
+
|
236 |
+
def __str__(self):
|
237 |
+
r_str = ""
|
238 |
+
for r in self._restrs:
|
239 |
+
r_str = r_str + "%s" % r
|
240 |
+
return f"{self._dir}{r_str}"
|
241 |
+
|
242 |
+
# The negation operator reverses the direction of the application
|
243 |
+
def __neg__(self):
|
244 |
+
if self._dir == "/":
|
245 |
+
return Direction("\\", self._restrs)
|
246 |
+
else:
|
247 |
+
return Direction("/", self._restrs)
|
248 |
+
|
249 |
+
|
250 |
+
class PrimitiveCategory(AbstractCCGCategory):
|
251 |
+
"""
|
252 |
+
Class representing primitive categories.
|
253 |
+
Takes a string representation of the category, and a
|
254 |
+
list of strings specifying the morphological subcategories.
|
255 |
+
"""
|
256 |
+
|
257 |
+
def __init__(self, categ, restrictions=[]):
|
258 |
+
self._categ = categ
|
259 |
+
self._restrs = restrictions
|
260 |
+
self._comparison_key = (categ, tuple(restrictions))
|
261 |
+
|
262 |
+
def is_primitive(self):
|
263 |
+
return True
|
264 |
+
|
265 |
+
def is_function(self):
|
266 |
+
return False
|
267 |
+
|
268 |
+
def is_var(self):
|
269 |
+
return False
|
270 |
+
|
271 |
+
def restrs(self):
|
272 |
+
return self._restrs
|
273 |
+
|
274 |
+
def categ(self):
|
275 |
+
return self._categ
|
276 |
+
|
277 |
+
# Substitution does nothing to a primitive category
|
278 |
+
def substitute(self, subs):
|
279 |
+
return self
|
280 |
+
|
281 |
+
# A primitive can be unified with a class of the same
|
282 |
+
# base category, given that the other category shares all
|
283 |
+
# of its subclasses, or with a variable.
|
284 |
+
def can_unify(self, other):
|
285 |
+
if not other.is_primitive():
|
286 |
+
return None
|
287 |
+
if other.is_var():
|
288 |
+
return [(other, self)]
|
289 |
+
if other.categ() == self.categ():
|
290 |
+
for restr in self._restrs:
|
291 |
+
if restr not in other.restrs():
|
292 |
+
return None
|
293 |
+
return []
|
294 |
+
return None
|
295 |
+
|
296 |
+
def __str__(self):
|
297 |
+
if self._restrs == []:
|
298 |
+
return "%s" % self._categ
|
299 |
+
restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs)
|
300 |
+
return f"{self._categ}{restrictions}"
|
301 |
+
|
302 |
+
|
303 |
+
class FunctionalCategory(AbstractCCGCategory):
|
304 |
+
"""
|
305 |
+
Class that represents a function application category.
|
306 |
+
Consists of argument and result categories, together with
|
307 |
+
an application direction.
|
308 |
+
"""
|
309 |
+
|
310 |
+
def __init__(self, res, arg, dir):
|
311 |
+
self._res = res
|
312 |
+
self._arg = arg
|
313 |
+
self._dir = dir
|
314 |
+
self._comparison_key = (arg, dir, res)
|
315 |
+
|
316 |
+
def is_primitive(self):
|
317 |
+
return False
|
318 |
+
|
319 |
+
def is_function(self):
|
320 |
+
return True
|
321 |
+
|
322 |
+
def is_var(self):
|
323 |
+
return False
|
324 |
+
|
325 |
+
# Substitution returns the category consisting of the
|
326 |
+
# substitution applied to each of its constituents.
|
327 |
+
def substitute(self, subs):
|
328 |
+
sub_res = self._res.substitute(subs)
|
329 |
+
sub_dir = self._dir.substitute(subs)
|
330 |
+
sub_arg = self._arg.substitute(subs)
|
331 |
+
return FunctionalCategory(sub_res, sub_arg, self._dir)
|
332 |
+
|
333 |
+
# A function can unify with another function, so long as its
|
334 |
+
# constituents can unify, or with an unrestricted variable.
|
335 |
+
def can_unify(self, other):
|
336 |
+
if other.is_var():
|
337 |
+
return [(other, self)]
|
338 |
+
if other.is_function():
|
339 |
+
sa = self._res.can_unify(other.res())
|
340 |
+
sd = self._dir.can_unify(other.dir())
|
341 |
+
if sa is not None and sd is not None:
|
342 |
+
sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa))
|
343 |
+
if sb is not None:
|
344 |
+
return sa + sb
|
345 |
+
return None
|
346 |
+
|
347 |
+
# Constituent accessors
|
348 |
+
def arg(self):
|
349 |
+
return self._arg
|
350 |
+
|
351 |
+
def res(self):
|
352 |
+
return self._res
|
353 |
+
|
354 |
+
def dir(self):
|
355 |
+
return self._dir
|
356 |
+
|
357 |
+
def __str__(self):
|
358 |
+
return f"({self._res}{self._dir}{self._arg})"
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/chart.py
ADDED
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
The lexicon is constructed by calling
|
10 |
+
``lexicon.fromstring(<lexicon string>)``.
|
11 |
+
|
12 |
+
In order to construct a parser, you also need a rule set.
|
13 |
+
The standard English rules are provided in chart as
|
14 |
+
``chart.DefaultRuleSet``.
|
15 |
+
|
16 |
+
The parser can then be constructed by calling, for example:
|
17 |
+
``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
|
18 |
+
|
19 |
+
Parsing is then performed by running
|
20 |
+
``parser.parse(<sentence>.split())``.
|
21 |
+
|
22 |
+
While this returns a list of trees, the default representation
|
23 |
+
of the produced trees is not very enlightening, particularly
|
24 |
+
given that it uses the same tree class as the CFG parsers.
|
25 |
+
It is probably better to call:
|
26 |
+
``chart.printCCGDerivation(<parse tree extracted from list>)``
|
27 |
+
which should print a nice representation of the derivation.
|
28 |
+
|
29 |
+
This entire process is shown far more clearly in the demonstration:
|
30 |
+
python chart.py
|
31 |
+
"""
|
32 |
+
|
33 |
+
import itertools
|
34 |
+
|
35 |
+
from nltk.ccg.combinator import *
|
36 |
+
from nltk.ccg.combinator import (
|
37 |
+
BackwardApplication,
|
38 |
+
BackwardBx,
|
39 |
+
BackwardComposition,
|
40 |
+
BackwardSx,
|
41 |
+
BackwardT,
|
42 |
+
ForwardApplication,
|
43 |
+
ForwardComposition,
|
44 |
+
ForwardSubstitution,
|
45 |
+
ForwardT,
|
46 |
+
)
|
47 |
+
from nltk.ccg.lexicon import Token, fromstring
|
48 |
+
from nltk.ccg.logic import *
|
49 |
+
from nltk.parse import ParserI
|
50 |
+
from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
|
51 |
+
from nltk.sem.logic import *
|
52 |
+
from nltk.tree import Tree
|
53 |
+
|
54 |
+
|
55 |
+
# Based on the EdgeI class from NLTK.
|
56 |
+
# A number of the properties of the EdgeI interface don't
|
57 |
+
# transfer well to CCGs, however.
|
58 |
+
class CCGEdge(EdgeI):
|
59 |
+
def __init__(self, span, categ, rule):
|
60 |
+
self._span = span
|
61 |
+
self._categ = categ
|
62 |
+
self._rule = rule
|
63 |
+
self._comparison_key = (span, categ, rule)
|
64 |
+
|
65 |
+
# Accessors
|
66 |
+
def lhs(self):
|
67 |
+
return self._categ
|
68 |
+
|
69 |
+
def span(self):
|
70 |
+
return self._span
|
71 |
+
|
72 |
+
def start(self):
|
73 |
+
return self._span[0]
|
74 |
+
|
75 |
+
def end(self):
|
76 |
+
return self._span[1]
|
77 |
+
|
78 |
+
def length(self):
|
79 |
+
return self._span[1] - self.span[0]
|
80 |
+
|
81 |
+
def rhs(self):
|
82 |
+
return ()
|
83 |
+
|
84 |
+
def dot(self):
|
85 |
+
return 0
|
86 |
+
|
87 |
+
def is_complete(self):
|
88 |
+
return True
|
89 |
+
|
90 |
+
def is_incomplete(self):
|
91 |
+
return False
|
92 |
+
|
93 |
+
def nextsym(self):
|
94 |
+
return None
|
95 |
+
|
96 |
+
def categ(self):
|
97 |
+
return self._categ
|
98 |
+
|
99 |
+
def rule(self):
|
100 |
+
return self._rule
|
101 |
+
|
102 |
+
|
103 |
+
class CCGLeafEdge(EdgeI):
|
104 |
+
"""
|
105 |
+
Class representing leaf edges in a CCG derivation.
|
106 |
+
"""
|
107 |
+
|
108 |
+
def __init__(self, pos, token, leaf):
|
109 |
+
self._pos = pos
|
110 |
+
self._token = token
|
111 |
+
self._leaf = leaf
|
112 |
+
self._comparison_key = (pos, token.categ(), leaf)
|
113 |
+
|
114 |
+
# Accessors
|
115 |
+
def lhs(self):
|
116 |
+
return self._token.categ()
|
117 |
+
|
118 |
+
def span(self):
|
119 |
+
return (self._pos, self._pos + 1)
|
120 |
+
|
121 |
+
def start(self):
|
122 |
+
return self._pos
|
123 |
+
|
124 |
+
def end(self):
|
125 |
+
return self._pos + 1
|
126 |
+
|
127 |
+
def length(self):
|
128 |
+
return 1
|
129 |
+
|
130 |
+
def rhs(self):
|
131 |
+
return self._leaf
|
132 |
+
|
133 |
+
def dot(self):
|
134 |
+
return 0
|
135 |
+
|
136 |
+
def is_complete(self):
|
137 |
+
return True
|
138 |
+
|
139 |
+
def is_incomplete(self):
|
140 |
+
return False
|
141 |
+
|
142 |
+
def nextsym(self):
|
143 |
+
return None
|
144 |
+
|
145 |
+
def token(self):
|
146 |
+
return self._token
|
147 |
+
|
148 |
+
def categ(self):
|
149 |
+
return self._token.categ()
|
150 |
+
|
151 |
+
def leaf(self):
|
152 |
+
return self._leaf
|
153 |
+
|
154 |
+
|
155 |
+
class BinaryCombinatorRule(AbstractChartRule):
|
156 |
+
"""
|
157 |
+
Class implementing application of a binary combinator to a chart.
|
158 |
+
Takes the directed combinator to apply.
|
159 |
+
"""
|
160 |
+
|
161 |
+
NUMEDGES = 2
|
162 |
+
|
163 |
+
def __init__(self, combinator):
|
164 |
+
self._combinator = combinator
|
165 |
+
|
166 |
+
# Apply a combinator
|
167 |
+
def apply(self, chart, grammar, left_edge, right_edge):
|
168 |
+
# The left & right edges must be touching.
|
169 |
+
if not (left_edge.end() == right_edge.start()):
|
170 |
+
return
|
171 |
+
|
172 |
+
# Check if the two edges are permitted to combine.
|
173 |
+
# If so, generate the corresponding edge.
|
174 |
+
if self._combinator.can_combine(left_edge.categ(), right_edge.categ()):
|
175 |
+
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
|
176 |
+
new_edge = CCGEdge(
|
177 |
+
span=(left_edge.start(), right_edge.end()),
|
178 |
+
categ=res,
|
179 |
+
rule=self._combinator,
|
180 |
+
)
|
181 |
+
if chart.insert(new_edge, (left_edge, right_edge)):
|
182 |
+
yield new_edge
|
183 |
+
|
184 |
+
# The representation of the combinator (for printing derivations)
|
185 |
+
def __str__(self):
|
186 |
+
return "%s" % self._combinator
|
187 |
+
|
188 |
+
|
189 |
+
# Type-raising must be handled slightly differently to the other rules, as the
|
190 |
+
# resulting rules only span a single edge, rather than both edges.
|
191 |
+
|
192 |
+
|
193 |
+
class ForwardTypeRaiseRule(AbstractChartRule):
|
194 |
+
"""
|
195 |
+
Class for applying forward type raising
|
196 |
+
"""
|
197 |
+
|
198 |
+
NUMEDGES = 2
|
199 |
+
|
200 |
+
def __init__(self):
|
201 |
+
self._combinator = ForwardT
|
202 |
+
|
203 |
+
def apply(self, chart, grammar, left_edge, right_edge):
|
204 |
+
if not (left_edge.end() == right_edge.start()):
|
205 |
+
return
|
206 |
+
|
207 |
+
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
|
208 |
+
new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator)
|
209 |
+
if chart.insert(new_edge, (left_edge,)):
|
210 |
+
yield new_edge
|
211 |
+
|
212 |
+
def __str__(self):
|
213 |
+
return "%s" % self._combinator
|
214 |
+
|
215 |
+
|
216 |
+
class BackwardTypeRaiseRule(AbstractChartRule):
|
217 |
+
"""
|
218 |
+
Class for applying backward type raising.
|
219 |
+
"""
|
220 |
+
|
221 |
+
NUMEDGES = 2
|
222 |
+
|
223 |
+
def __init__(self):
|
224 |
+
self._combinator = BackwardT
|
225 |
+
|
226 |
+
def apply(self, chart, grammar, left_edge, right_edge):
|
227 |
+
if not (left_edge.end() == right_edge.start()):
|
228 |
+
return
|
229 |
+
|
230 |
+
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
|
231 |
+
new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator)
|
232 |
+
if chart.insert(new_edge, (right_edge,)):
|
233 |
+
yield new_edge
|
234 |
+
|
235 |
+
def __str__(self):
|
236 |
+
return "%s" % self._combinator
|
237 |
+
|
238 |
+
|
239 |
+
# Common sets of combinators used for English derivations.
|
240 |
+
ApplicationRuleSet = [
|
241 |
+
BinaryCombinatorRule(ForwardApplication),
|
242 |
+
BinaryCombinatorRule(BackwardApplication),
|
243 |
+
]
|
244 |
+
CompositionRuleSet = [
|
245 |
+
BinaryCombinatorRule(ForwardComposition),
|
246 |
+
BinaryCombinatorRule(BackwardComposition),
|
247 |
+
BinaryCombinatorRule(BackwardBx),
|
248 |
+
]
|
249 |
+
SubstitutionRuleSet = [
|
250 |
+
BinaryCombinatorRule(ForwardSubstitution),
|
251 |
+
BinaryCombinatorRule(BackwardSx),
|
252 |
+
]
|
253 |
+
TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
|
254 |
+
|
255 |
+
# The standard English rule set.
|
256 |
+
DefaultRuleSet = (
|
257 |
+
ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet
|
258 |
+
)
|
259 |
+
|
260 |
+
|
261 |
+
class CCGChartParser(ParserI):
|
262 |
+
"""
|
263 |
+
Chart parser for CCGs.
|
264 |
+
Based largely on the ChartParser class from NLTK.
|
265 |
+
"""
|
266 |
+
|
267 |
+
def __init__(self, lexicon, rules, trace=0):
|
268 |
+
self._lexicon = lexicon
|
269 |
+
self._rules = rules
|
270 |
+
self._trace = trace
|
271 |
+
|
272 |
+
def lexicon(self):
|
273 |
+
return self._lexicon
|
274 |
+
|
275 |
+
# Implements the CYK algorithm
|
276 |
+
def parse(self, tokens):
|
277 |
+
tokens = list(tokens)
|
278 |
+
chart = CCGChart(list(tokens))
|
279 |
+
lex = self._lexicon
|
280 |
+
|
281 |
+
# Initialize leaf edges.
|
282 |
+
for index in range(chart.num_leaves()):
|
283 |
+
for token in lex.categories(chart.leaf(index)):
|
284 |
+
new_edge = CCGLeafEdge(index, token, chart.leaf(index))
|
285 |
+
chart.insert(new_edge, ())
|
286 |
+
|
287 |
+
# Select a span for the new edges
|
288 |
+
for span in range(2, chart.num_leaves() + 1):
|
289 |
+
for start in range(0, chart.num_leaves() - span + 1):
|
290 |
+
# Try all possible pairs of edges that could generate
|
291 |
+
# an edge for that span
|
292 |
+
for part in range(1, span):
|
293 |
+
lstart = start
|
294 |
+
mid = start + part
|
295 |
+
rend = start + span
|
296 |
+
|
297 |
+
for left in chart.select(span=(lstart, mid)):
|
298 |
+
for right in chart.select(span=(mid, rend)):
|
299 |
+
# Generate all possible combinations of the two edges
|
300 |
+
for rule in self._rules:
|
301 |
+
edges_added_by_rule = 0
|
302 |
+
for newedge in rule.apply(chart, lex, left, right):
|
303 |
+
edges_added_by_rule += 1
|
304 |
+
|
305 |
+
# Output the resulting parses
|
306 |
+
return chart.parses(lex.start())
|
307 |
+
|
308 |
+
|
309 |
+
class CCGChart(Chart):
|
310 |
+
def __init__(self, tokens):
|
311 |
+
Chart.__init__(self, tokens)
|
312 |
+
|
313 |
+
# Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
|
314 |
+
# constructed slightly differently to those in the default Chart class, so it has to
|
315 |
+
# be reimplemented
|
316 |
+
def _trees(self, edge, complete, memo, tree_class):
|
317 |
+
assert complete, "CCGChart cannot build incomplete trees"
|
318 |
+
|
319 |
+
if edge in memo:
|
320 |
+
return memo[edge]
|
321 |
+
|
322 |
+
if isinstance(edge, CCGLeafEdge):
|
323 |
+
word = tree_class(edge.token(), [self._tokens[edge.start()]])
|
324 |
+
leaf = tree_class((edge.token(), "Leaf"), [word])
|
325 |
+
memo[edge] = [leaf]
|
326 |
+
return [leaf]
|
327 |
+
|
328 |
+
memo[edge] = []
|
329 |
+
trees = []
|
330 |
+
|
331 |
+
for cpl in self.child_pointer_lists(edge):
|
332 |
+
child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
|
333 |
+
for children in itertools.product(*child_choices):
|
334 |
+
lhs = (
|
335 |
+
Token(
|
336 |
+
self._tokens[edge.start() : edge.end()],
|
337 |
+
edge.lhs(),
|
338 |
+
compute_semantics(children, edge),
|
339 |
+
),
|
340 |
+
str(edge.rule()),
|
341 |
+
)
|
342 |
+
trees.append(tree_class(lhs, children))
|
343 |
+
|
344 |
+
memo[edge] = trees
|
345 |
+
return trees
|
346 |
+
|
347 |
+
|
348 |
+
def compute_semantics(children, edge):
|
349 |
+
if children[0].label()[0].semantics() is None:
|
350 |
+
return None
|
351 |
+
|
352 |
+
if len(children) == 2:
|
353 |
+
if isinstance(edge.rule(), BackwardCombinator):
|
354 |
+
children = [children[1], children[0]]
|
355 |
+
|
356 |
+
combinator = edge.rule()._combinator
|
357 |
+
function = children[0].label()[0].semantics()
|
358 |
+
argument = children[1].label()[0].semantics()
|
359 |
+
|
360 |
+
if isinstance(combinator, UndirectedFunctionApplication):
|
361 |
+
return compute_function_semantics(function, argument)
|
362 |
+
elif isinstance(combinator, UndirectedComposition):
|
363 |
+
return compute_composition_semantics(function, argument)
|
364 |
+
elif isinstance(combinator, UndirectedSubstitution):
|
365 |
+
return compute_substitution_semantics(function, argument)
|
366 |
+
else:
|
367 |
+
raise AssertionError("Unsupported combinator '" + combinator + "'")
|
368 |
+
else:
|
369 |
+
return compute_type_raised_semantics(children[0].label()[0].semantics())
|
370 |
+
|
371 |
+
|
372 |
+
# --------
|
373 |
+
# Displaying derivations
|
374 |
+
# --------
|
375 |
+
def printCCGDerivation(tree):
|
376 |
+
# Get the leaves and initial categories
|
377 |
+
leafcats = tree.pos()
|
378 |
+
leafstr = ""
|
379 |
+
catstr = ""
|
380 |
+
|
381 |
+
# Construct a string with both the leaf word and corresponding
|
382 |
+
# category aligned.
|
383 |
+
for (leaf, cat) in leafcats:
|
384 |
+
str_cat = "%s" % cat
|
385 |
+
nextlen = 2 + max(len(leaf), len(str_cat))
|
386 |
+
lcatlen = (nextlen - len(str_cat)) // 2
|
387 |
+
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
|
388 |
+
catstr += " " * lcatlen + str_cat + " " * rcatlen
|
389 |
+
lleaflen = (nextlen - len(leaf)) // 2
|
390 |
+
rleaflen = lleaflen + (nextlen - len(leaf)) % 2
|
391 |
+
leafstr += " " * lleaflen + leaf + " " * rleaflen
|
392 |
+
print(leafstr.rstrip())
|
393 |
+
print(catstr.rstrip())
|
394 |
+
|
395 |
+
# Display the derivation steps
|
396 |
+
printCCGTree(0, tree)
|
397 |
+
|
398 |
+
|
399 |
+
# Prints the sequence of derivation steps.
|
400 |
+
def printCCGTree(lwidth, tree):
|
401 |
+
rwidth = lwidth
|
402 |
+
|
403 |
+
# Is a leaf (word).
|
404 |
+
# Increment the span by the space occupied by the leaf.
|
405 |
+
if not isinstance(tree, Tree):
|
406 |
+
return 2 + lwidth + len(tree)
|
407 |
+
|
408 |
+
# Find the width of the current derivation step
|
409 |
+
for child in tree:
|
410 |
+
rwidth = max(rwidth, printCCGTree(rwidth, child))
|
411 |
+
|
412 |
+
# Is a leaf node.
|
413 |
+
# Don't print anything, but account for the space occupied.
|
414 |
+
if not isinstance(tree.label(), tuple):
|
415 |
+
return max(
|
416 |
+
rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0])
|
417 |
+
)
|
418 |
+
|
419 |
+
(token, op) = tree.label()
|
420 |
+
|
421 |
+
if op == "Leaf":
|
422 |
+
return rwidth
|
423 |
+
|
424 |
+
# Pad to the left with spaces, followed by a sequence of '-'
|
425 |
+
# and the derivation rule.
|
426 |
+
print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op)
|
427 |
+
# Print the resulting category on a new line.
|
428 |
+
str_res = "%s" % (token.categ())
|
429 |
+
if token.semantics() is not None:
|
430 |
+
str_res += " {" + str(token.semantics()) + "}"
|
431 |
+
respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
|
432 |
+
print(respadlen * " " + str_res)
|
433 |
+
return rwidth
|
434 |
+
|
435 |
+
|
436 |
+
### Demonstration code
|
437 |
+
|
438 |
+
# Construct the lexicon
|
439 |
+
lex = fromstring(
|
440 |
+
"""
|
441 |
+
:- S, NP, N, VP # Primitive categories, S is the target primitive
|
442 |
+
|
443 |
+
Det :: NP/N # Family of words
|
444 |
+
Pro :: NP
|
445 |
+
TV :: VP/NP
|
446 |
+
Modal :: (S\\NP)/VP # Backslashes need to be escaped
|
447 |
+
|
448 |
+
I => Pro # Word -> Category mapping
|
449 |
+
you => Pro
|
450 |
+
|
451 |
+
the => Det
|
452 |
+
|
453 |
+
# Variables have the special keyword 'var'
|
454 |
+
# '.' prevents permutation
|
455 |
+
# ',' prevents composition
|
456 |
+
and => var\\.,var/.,var
|
457 |
+
|
458 |
+
which => (N\\N)/(S/NP)
|
459 |
+
|
460 |
+
will => Modal # Categories can be either explicit, or families.
|
461 |
+
might => Modal
|
462 |
+
|
463 |
+
cook => TV
|
464 |
+
eat => TV
|
465 |
+
|
466 |
+
mushrooms => N
|
467 |
+
parsnips => N
|
468 |
+
bacon => N
|
469 |
+
"""
|
470 |
+
)
|
471 |
+
|
472 |
+
|
473 |
+
def demo():
|
474 |
+
parser = CCGChartParser(lex, DefaultRuleSet)
|
475 |
+
for parse in parser.parse("I might cook and eat the bacon".split()):
|
476 |
+
printCCGDerivation(parse)
|
477 |
+
|
478 |
+
|
479 |
+
if __name__ == "__main__":
|
480 |
+
demo()
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/combinator.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
"""
|
8 |
+
CCG Combinators
|
9 |
+
"""
|
10 |
+
|
11 |
+
from abc import ABCMeta, abstractmethod
|
12 |
+
|
13 |
+
from nltk.ccg.api import FunctionalCategory
|
14 |
+
|
15 |
+
|
16 |
+
class UndirectedBinaryCombinator(metaclass=ABCMeta):
|
17 |
+
"""
|
18 |
+
Abstract class for representing a binary combinator.
|
19 |
+
Merely defines functions for checking if the function and argument
|
20 |
+
are able to be combined, and what the resulting category is.
|
21 |
+
|
22 |
+
Note that as no assumptions are made as to direction, the unrestricted
|
23 |
+
combinators can perform all backward, forward and crossed variations
|
24 |
+
of the combinators; these restrictions must be added in the rule
|
25 |
+
class.
|
26 |
+
"""
|
27 |
+
|
28 |
+
@abstractmethod
|
29 |
+
def can_combine(self, function, argument):
|
30 |
+
pass
|
31 |
+
|
32 |
+
@abstractmethod
|
33 |
+
def combine(self, function, argument):
|
34 |
+
pass
|
35 |
+
|
36 |
+
|
37 |
+
class DirectedBinaryCombinator(metaclass=ABCMeta):
|
38 |
+
"""
|
39 |
+
Wrapper for the undirected binary combinator.
|
40 |
+
It takes left and right categories, and decides which is to be
|
41 |
+
the function, and which the argument.
|
42 |
+
It then decides whether or not they can be combined.
|
43 |
+
"""
|
44 |
+
|
45 |
+
@abstractmethod
|
46 |
+
def can_combine(self, left, right):
|
47 |
+
pass
|
48 |
+
|
49 |
+
@abstractmethod
|
50 |
+
def combine(self, left, right):
|
51 |
+
pass
|
52 |
+
|
53 |
+
|
54 |
+
class ForwardCombinator(DirectedBinaryCombinator):
|
55 |
+
"""
|
56 |
+
Class representing combinators where the primary functor is on the left.
|
57 |
+
|
58 |
+
Takes an undirected combinator, and a predicate which adds constraints
|
59 |
+
restricting the cases in which it may apply.
|
60 |
+
"""
|
61 |
+
|
62 |
+
def __init__(self, combinator, predicate, suffix=""):
|
63 |
+
self._combinator = combinator
|
64 |
+
self._predicate = predicate
|
65 |
+
self._suffix = suffix
|
66 |
+
|
67 |
+
def can_combine(self, left, right):
|
68 |
+
return self._combinator.can_combine(left, right) and self._predicate(
|
69 |
+
left, right
|
70 |
+
)
|
71 |
+
|
72 |
+
def combine(self, left, right):
|
73 |
+
yield from self._combinator.combine(left, right)
|
74 |
+
|
75 |
+
def __str__(self):
|
76 |
+
return f">{self._combinator}{self._suffix}"
|
77 |
+
|
78 |
+
|
79 |
+
class BackwardCombinator(DirectedBinaryCombinator):
|
80 |
+
"""
|
81 |
+
The backward equivalent of the ForwardCombinator class.
|
82 |
+
"""
|
83 |
+
|
84 |
+
def __init__(self, combinator, predicate, suffix=""):
|
85 |
+
self._combinator = combinator
|
86 |
+
self._predicate = predicate
|
87 |
+
self._suffix = suffix
|
88 |
+
|
89 |
+
def can_combine(self, left, right):
|
90 |
+
return self._combinator.can_combine(right, left) and self._predicate(
|
91 |
+
left, right
|
92 |
+
)
|
93 |
+
|
94 |
+
def combine(self, left, right):
|
95 |
+
yield from self._combinator.combine(right, left)
|
96 |
+
|
97 |
+
def __str__(self):
|
98 |
+
return f"<{self._combinator}{self._suffix}"
|
99 |
+
|
100 |
+
|
101 |
+
class UndirectedFunctionApplication(UndirectedBinaryCombinator):
|
102 |
+
"""
|
103 |
+
Class representing function application.
|
104 |
+
Implements rules of the form:
|
105 |
+
X/Y Y -> X (>)
|
106 |
+
And the corresponding backwards application rule
|
107 |
+
"""
|
108 |
+
|
109 |
+
def can_combine(self, function, argument):
|
110 |
+
if not function.is_function():
|
111 |
+
return False
|
112 |
+
|
113 |
+
return not function.arg().can_unify(argument) is None
|
114 |
+
|
115 |
+
def combine(self, function, argument):
|
116 |
+
if not function.is_function():
|
117 |
+
return
|
118 |
+
|
119 |
+
subs = function.arg().can_unify(argument)
|
120 |
+
if subs is None:
|
121 |
+
return
|
122 |
+
|
123 |
+
yield function.res().substitute(subs)
|
124 |
+
|
125 |
+
def __str__(self):
|
126 |
+
return ""
|
127 |
+
|
128 |
+
|
129 |
+
# Predicates for function application.
|
130 |
+
|
131 |
+
# Ensures the left functor takes an argument on the right
|
132 |
+
def forwardOnly(left, right):
|
133 |
+
return left.dir().is_forward()
|
134 |
+
|
135 |
+
|
136 |
+
# Ensures the right functor takes an argument on the left
|
137 |
+
def backwardOnly(left, right):
|
138 |
+
return right.dir().is_backward()
|
139 |
+
|
140 |
+
|
141 |
+
# Application combinator instances
|
142 |
+
ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly)
|
143 |
+
BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly)
|
144 |
+
|
145 |
+
|
146 |
+
class UndirectedComposition(UndirectedBinaryCombinator):
|
147 |
+
"""
|
148 |
+
Functional composition (harmonic) combinator.
|
149 |
+
Implements rules of the form
|
150 |
+
X/Y Y/Z -> X/Z (B>)
|
151 |
+
And the corresponding backwards and crossed variations.
|
152 |
+
"""
|
153 |
+
|
154 |
+
def can_combine(self, function, argument):
|
155 |
+
# Can only combine two functions, and both functions must
|
156 |
+
# allow composition.
|
157 |
+
if not (function.is_function() and argument.is_function()):
|
158 |
+
return False
|
159 |
+
if function.dir().can_compose() and argument.dir().can_compose():
|
160 |
+
return not function.arg().can_unify(argument.res()) is None
|
161 |
+
return False
|
162 |
+
|
163 |
+
def combine(self, function, argument):
|
164 |
+
if not (function.is_function() and argument.is_function()):
|
165 |
+
return
|
166 |
+
if function.dir().can_compose() and argument.dir().can_compose():
|
167 |
+
subs = function.arg().can_unify(argument.res())
|
168 |
+
if subs is not None:
|
169 |
+
yield FunctionalCategory(
|
170 |
+
function.res().substitute(subs),
|
171 |
+
argument.arg().substitute(subs),
|
172 |
+
argument.dir(),
|
173 |
+
)
|
174 |
+
|
175 |
+
def __str__(self):
|
176 |
+
return "B"
|
177 |
+
|
178 |
+
|
179 |
+
# Predicates for restricting application of straight composition.
|
180 |
+
def bothForward(left, right):
|
181 |
+
return left.dir().is_forward() and right.dir().is_forward()
|
182 |
+
|
183 |
+
|
184 |
+
def bothBackward(left, right):
|
185 |
+
return left.dir().is_backward() and right.dir().is_backward()
|
186 |
+
|
187 |
+
|
188 |
+
# Predicates for crossed composition
|
189 |
+
def crossedDirs(left, right):
|
190 |
+
return left.dir().is_forward() and right.dir().is_backward()
|
191 |
+
|
192 |
+
|
193 |
+
def backwardBxConstraint(left, right):
|
194 |
+
# The functors must be crossed inwards
|
195 |
+
if not crossedDirs(left, right):
|
196 |
+
return False
|
197 |
+
# Permuting combinators must be allowed
|
198 |
+
if not left.dir().can_cross() and right.dir().can_cross():
|
199 |
+
return False
|
200 |
+
# The resulting argument category is restricted to be primitive
|
201 |
+
return left.arg().is_primitive()
|
202 |
+
|
203 |
+
|
204 |
+
# Straight composition combinators
|
205 |
+
ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly)
|
206 |
+
BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly)
|
207 |
+
|
208 |
+
# Backward crossed composition
|
209 |
+
BackwardBx = BackwardCombinator(
|
210 |
+
UndirectedComposition(), backwardBxConstraint, suffix="x"
|
211 |
+
)
|
212 |
+
|
213 |
+
|
214 |
+
class UndirectedSubstitution(UndirectedBinaryCombinator):
|
215 |
+
r"""
|
216 |
+
Substitution (permutation) combinator.
|
217 |
+
Implements rules of the form
|
218 |
+
Y/Z (X\Y)/Z -> X/Z (<Sx)
|
219 |
+
And other variations.
|
220 |
+
"""
|
221 |
+
|
222 |
+
def can_combine(self, function, argument):
|
223 |
+
if function.is_primitive() or argument.is_primitive():
|
224 |
+
return False
|
225 |
+
|
226 |
+
# These could potentially be moved to the predicates, as the
|
227 |
+
# constraints may not be general to all languages.
|
228 |
+
if function.res().is_primitive():
|
229 |
+
return False
|
230 |
+
if not function.arg().is_primitive():
|
231 |
+
return False
|
232 |
+
|
233 |
+
if not (function.dir().can_compose() and argument.dir().can_compose()):
|
234 |
+
return False
|
235 |
+
return (function.res().arg() == argument.res()) and (
|
236 |
+
function.arg() == argument.arg()
|
237 |
+
)
|
238 |
+
|
239 |
+
def combine(self, function, argument):
|
240 |
+
if self.can_combine(function, argument):
|
241 |
+
yield FunctionalCategory(
|
242 |
+
function.res().res(), argument.arg(), argument.dir()
|
243 |
+
)
|
244 |
+
|
245 |
+
def __str__(self):
|
246 |
+
return "S"
|
247 |
+
|
248 |
+
|
249 |
+
# Predicate for forward substitution
|
250 |
+
def forwardSConstraint(left, right):
|
251 |
+
if not bothForward(left, right):
|
252 |
+
return False
|
253 |
+
return left.res().dir().is_forward() and left.arg().is_primitive()
|
254 |
+
|
255 |
+
|
256 |
+
# Predicate for backward crossed substitution
|
257 |
+
def backwardSxConstraint(left, right):
|
258 |
+
if not left.dir().can_cross() and right.dir().can_cross():
|
259 |
+
return False
|
260 |
+
if not bothForward(left, right):
|
261 |
+
return False
|
262 |
+
return right.res().dir().is_backward() and right.arg().is_primitive()
|
263 |
+
|
264 |
+
|
265 |
+
# Instances of substitution combinators
|
266 |
+
ForwardSubstitution = ForwardCombinator(UndirectedSubstitution(), forwardSConstraint)
|
267 |
+
BackwardSx = BackwardCombinator(UndirectedSubstitution(), backwardSxConstraint, "x")
|
268 |
+
|
269 |
+
|
270 |
+
# Retrieves the left-most functional category.
|
271 |
+
# ie, (N\N)/(S/NP) => N\N
|
272 |
+
def innermostFunction(categ):
|
273 |
+
while categ.res().is_function():
|
274 |
+
categ = categ.res()
|
275 |
+
return categ
|
276 |
+
|
277 |
+
|
278 |
+
class UndirectedTypeRaise(UndirectedBinaryCombinator):
|
279 |
+
"""
|
280 |
+
Undirected combinator for type raising.
|
281 |
+
"""
|
282 |
+
|
283 |
+
def can_combine(self, function, arg):
|
284 |
+
# The argument must be a function.
|
285 |
+
# The restriction that arg.res() must be a function
|
286 |
+
# merely reduces redundant type-raising; if arg.res() is
|
287 |
+
# primitive, we have:
|
288 |
+
# X Y\X =>(<T) Y/(Y\X) Y\X =>(>) Y
|
289 |
+
# which is equivalent to
|
290 |
+
# X Y\X =>(<) Y
|
291 |
+
if not (arg.is_function() and arg.res().is_function()):
|
292 |
+
return False
|
293 |
+
|
294 |
+
arg = innermostFunction(arg)
|
295 |
+
|
296 |
+
# left, arg_categ are undefined!
|
297 |
+
subs = left.can_unify(arg_categ.arg())
|
298 |
+
if subs is not None:
|
299 |
+
return True
|
300 |
+
return False
|
301 |
+
|
302 |
+
def combine(self, function, arg):
|
303 |
+
if not (
|
304 |
+
function.is_primitive() and arg.is_function() and arg.res().is_function()
|
305 |
+
):
|
306 |
+
return
|
307 |
+
|
308 |
+
# Type-raising matches only the innermost application.
|
309 |
+
arg = innermostFunction(arg)
|
310 |
+
|
311 |
+
subs = function.can_unify(arg.arg())
|
312 |
+
if subs is not None:
|
313 |
+
xcat = arg.res().substitute(subs)
|
314 |
+
yield FunctionalCategory(
|
315 |
+
xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir())
|
316 |
+
)
|
317 |
+
|
318 |
+
def __str__(self):
|
319 |
+
return "T"
|
320 |
+
|
321 |
+
|
322 |
+
# Predicates for type-raising
|
323 |
+
# The direction of the innermost category must be towards
|
324 |
+
# the primary functor.
|
325 |
+
# The restriction that the variable must be primitive is not
|
326 |
+
# common to all versions of CCGs; some authors have other restrictions.
|
327 |
+
def forwardTConstraint(left, right):
|
328 |
+
arg = innermostFunction(right)
|
329 |
+
return arg.dir().is_backward() and arg.res().is_primitive()
|
330 |
+
|
331 |
+
|
332 |
+
def backwardTConstraint(left, right):
|
333 |
+
arg = innermostFunction(left)
|
334 |
+
return arg.dir().is_forward() and arg.res().is_primitive()
|
335 |
+
|
336 |
+
|
337 |
+
# Instances of type-raising combinators
|
338 |
+
ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint)
|
339 |
+
BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint)
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/lexicon.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
"""
|
8 |
+
CCG Lexicons
|
9 |
+
"""
|
10 |
+
|
11 |
+
import re
|
12 |
+
from collections import defaultdict
|
13 |
+
|
14 |
+
from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory
|
15 |
+
from nltk.internals import deprecated
|
16 |
+
from nltk.sem.logic import Expression
|
17 |
+
|
18 |
+
# ------------
|
19 |
+
# Regular expressions used for parsing components of the lexicon
|
20 |
+
# ------------
|
21 |
+
|
22 |
+
# Parses a primitive category and subscripts
|
23 |
+
PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""")
|
24 |
+
|
25 |
+
# Separates the next primitive category from the remainder of the
|
26 |
+
# string
|
27 |
+
NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""")
|
28 |
+
|
29 |
+
# Separates the next application operator from the remainder
|
30 |
+
APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""")
|
31 |
+
|
32 |
+
# Parses the definition of the right-hand side (rhs) of either a word or a family
|
33 |
+
LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE)
|
34 |
+
|
35 |
+
# Parses the right hand side that contains category and maybe semantic predicate
|
36 |
+
RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE)
|
37 |
+
|
38 |
+
# Parses the semantic predicate
|
39 |
+
SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE)
|
40 |
+
|
41 |
+
# Strips comments from a line
|
42 |
+
COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""")
|
43 |
+
|
44 |
+
|
45 |
+
class Token:
|
46 |
+
"""
|
47 |
+
Class representing a token.
|
48 |
+
|
49 |
+
token => category {semantics}
|
50 |
+
e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)}
|
51 |
+
|
52 |
+
* `token` (string)
|
53 |
+
* `categ` (string)
|
54 |
+
* `semantics` (Expression)
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, token, categ, semantics=None):
|
58 |
+
self._token = token
|
59 |
+
self._categ = categ
|
60 |
+
self._semantics = semantics
|
61 |
+
|
62 |
+
def categ(self):
|
63 |
+
return self._categ
|
64 |
+
|
65 |
+
def semantics(self):
|
66 |
+
return self._semantics
|
67 |
+
|
68 |
+
def __str__(self):
|
69 |
+
semantics_str = ""
|
70 |
+
if self._semantics is not None:
|
71 |
+
semantics_str = " {" + str(self._semantics) + "}"
|
72 |
+
return "" + str(self._categ) + semantics_str
|
73 |
+
|
74 |
+
def __cmp__(self, other):
|
75 |
+
if not isinstance(other, Token):
|
76 |
+
return -1
|
77 |
+
return cmp((self._categ, self._semantics), other.categ(), other.semantics())
|
78 |
+
|
79 |
+
|
80 |
+
class CCGLexicon:
|
81 |
+
"""
|
82 |
+
Class representing a lexicon for CCG grammars.
|
83 |
+
|
84 |
+
* `primitives`: The list of primitive categories for the lexicon
|
85 |
+
* `families`: Families of categories
|
86 |
+
* `entries`: A mapping of words to possible categories
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self, start, primitives, families, entries):
|
90 |
+
self._start = PrimitiveCategory(start)
|
91 |
+
self._primitives = primitives
|
92 |
+
self._families = families
|
93 |
+
self._entries = entries
|
94 |
+
|
95 |
+
def categories(self, word):
|
96 |
+
"""
|
97 |
+
Returns all the possible categories for a word
|
98 |
+
"""
|
99 |
+
return self._entries[word]
|
100 |
+
|
101 |
+
def start(self):
|
102 |
+
"""
|
103 |
+
Return the target category for the parser
|
104 |
+
"""
|
105 |
+
return self._start
|
106 |
+
|
107 |
+
def __str__(self):
|
108 |
+
"""
|
109 |
+
String representation of the lexicon. Used for debugging.
|
110 |
+
"""
|
111 |
+
string = ""
|
112 |
+
first = True
|
113 |
+
for ident in sorted(self._entries):
|
114 |
+
if not first:
|
115 |
+
string = string + "\n"
|
116 |
+
string = string + ident + " => "
|
117 |
+
|
118 |
+
first = True
|
119 |
+
for cat in self._entries[ident]:
|
120 |
+
if not first:
|
121 |
+
string = string + " | "
|
122 |
+
else:
|
123 |
+
first = False
|
124 |
+
string = string + "%s" % cat
|
125 |
+
return string
|
126 |
+
|
127 |
+
|
128 |
+
# -----------
|
129 |
+
# Parsing lexicons
|
130 |
+
# -----------
|
131 |
+
|
132 |
+
|
133 |
+
def matchBrackets(string):
|
134 |
+
"""
|
135 |
+
Separate the contents matching the first set of brackets from the rest of
|
136 |
+
the input.
|
137 |
+
"""
|
138 |
+
rest = string[1:]
|
139 |
+
inside = "("
|
140 |
+
|
141 |
+
while rest != "" and not rest.startswith(")"):
|
142 |
+
if rest.startswith("("):
|
143 |
+
(part, rest) = matchBrackets(rest)
|
144 |
+
inside = inside + part
|
145 |
+
else:
|
146 |
+
inside = inside + rest[0]
|
147 |
+
rest = rest[1:]
|
148 |
+
if rest.startswith(")"):
|
149 |
+
return (inside + ")", rest[1:])
|
150 |
+
raise AssertionError("Unmatched bracket in string '" + string + "'")
|
151 |
+
|
152 |
+
|
153 |
+
def nextCategory(string):
|
154 |
+
"""
|
155 |
+
Separate the string for the next portion of the category from the rest
|
156 |
+
of the string
|
157 |
+
"""
|
158 |
+
if string.startswith("("):
|
159 |
+
return matchBrackets(string)
|
160 |
+
return NEXTPRIM_RE.match(string).groups()
|
161 |
+
|
162 |
+
|
163 |
+
def parseApplication(app):
|
164 |
+
"""
|
165 |
+
Parse an application operator
|
166 |
+
"""
|
167 |
+
return Direction(app[0], app[1:])
|
168 |
+
|
169 |
+
|
170 |
+
def parseSubscripts(subscr):
|
171 |
+
"""
|
172 |
+
Parse the subscripts for a primitive category
|
173 |
+
"""
|
174 |
+
if subscr:
|
175 |
+
return subscr[1:-1].split(",")
|
176 |
+
return []
|
177 |
+
|
178 |
+
|
179 |
+
def parsePrimitiveCategory(chunks, primitives, families, var):
|
180 |
+
"""
|
181 |
+
Parse a primitive category
|
182 |
+
|
183 |
+
If the primitive is the special category 'var', replace it with the
|
184 |
+
correct `CCGVar`.
|
185 |
+
"""
|
186 |
+
if chunks[0] == "var":
|
187 |
+
if chunks[1] is None:
|
188 |
+
if var is None:
|
189 |
+
var = CCGVar()
|
190 |
+
return (var, var)
|
191 |
+
|
192 |
+
catstr = chunks[0]
|
193 |
+
if catstr in families:
|
194 |
+
(cat, cvar) = families[catstr]
|
195 |
+
if var is None:
|
196 |
+
var = cvar
|
197 |
+
else:
|
198 |
+
cat = cat.substitute([(cvar, var)])
|
199 |
+
return (cat, var)
|
200 |
+
|
201 |
+
if catstr in primitives:
|
202 |
+
subscrs = parseSubscripts(chunks[1])
|
203 |
+
return (PrimitiveCategory(catstr, subscrs), var)
|
204 |
+
raise AssertionError(
|
205 |
+
"String '" + catstr + "' is neither a family nor primitive category."
|
206 |
+
)
|
207 |
+
|
208 |
+
|
209 |
+
def augParseCategory(line, primitives, families, var=None):
|
210 |
+
"""
|
211 |
+
Parse a string representing a category, and returns a tuple with
|
212 |
+
(possibly) the CCG variable for the category
|
213 |
+
"""
|
214 |
+
(cat_string, rest) = nextCategory(line)
|
215 |
+
|
216 |
+
if cat_string.startswith("("):
|
217 |
+
(res, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
|
218 |
+
|
219 |
+
else:
|
220 |
+
(res, var) = parsePrimitiveCategory(
|
221 |
+
PRIM_RE.match(cat_string).groups(), primitives, families, var
|
222 |
+
)
|
223 |
+
|
224 |
+
while rest != "":
|
225 |
+
app = APP_RE.match(rest).groups()
|
226 |
+
direction = parseApplication(app[0:3])
|
227 |
+
rest = app[3]
|
228 |
+
|
229 |
+
(cat_string, rest) = nextCategory(rest)
|
230 |
+
if cat_string.startswith("("):
|
231 |
+
(arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
|
232 |
+
else:
|
233 |
+
(arg, var) = parsePrimitiveCategory(
|
234 |
+
PRIM_RE.match(cat_string).groups(), primitives, families, var
|
235 |
+
)
|
236 |
+
res = FunctionalCategory(res, arg, direction)
|
237 |
+
|
238 |
+
return (res, var)
|
239 |
+
|
240 |
+
|
241 |
+
def fromstring(lex_str, include_semantics=False):
|
242 |
+
"""
|
243 |
+
Convert string representation into a lexicon for CCGs.
|
244 |
+
"""
|
245 |
+
CCGVar.reset_id()
|
246 |
+
primitives = []
|
247 |
+
families = {}
|
248 |
+
entries = defaultdict(list)
|
249 |
+
for line in lex_str.splitlines():
|
250 |
+
# Strip comments and leading/trailing whitespace.
|
251 |
+
line = COMMENTS_RE.match(line).groups()[0].strip()
|
252 |
+
if line == "":
|
253 |
+
continue
|
254 |
+
|
255 |
+
if line.startswith(":-"):
|
256 |
+
# A line of primitive categories.
|
257 |
+
# The first one is the target category
|
258 |
+
# ie, :- S, N, NP, VP
|
259 |
+
primitives = primitives + [
|
260 |
+
prim.strip() for prim in line[2:].strip().split(",")
|
261 |
+
]
|
262 |
+
else:
|
263 |
+
# Either a family definition, or a word definition
|
264 |
+
(ident, sep, rhs) = LEX_RE.match(line).groups()
|
265 |
+
(catstr, semantics_str) = RHS_RE.match(rhs).groups()
|
266 |
+
(cat, var) = augParseCategory(catstr, primitives, families)
|
267 |
+
|
268 |
+
if sep == "::":
|
269 |
+
# Family definition
|
270 |
+
# ie, Det :: NP/N
|
271 |
+
families[ident] = (cat, var)
|
272 |
+
else:
|
273 |
+
semantics = None
|
274 |
+
if include_semantics is True:
|
275 |
+
if semantics_str is None:
|
276 |
+
raise AssertionError(
|
277 |
+
line
|
278 |
+
+ " must contain semantics because include_semantics is set to True"
|
279 |
+
)
|
280 |
+
else:
|
281 |
+
semantics = Expression.fromstring(
|
282 |
+
SEMANTICS_RE.match(semantics_str).groups()[0]
|
283 |
+
)
|
284 |
+
# Word definition
|
285 |
+
# ie, which => (N\N)/(S/NP)
|
286 |
+
entries[ident].append(Token(ident, cat, semantics))
|
287 |
+
return CCGLexicon(primitives[0], primitives, families, entries)
|
288 |
+
|
289 |
+
|
290 |
+
@deprecated("Use fromstring() instead.")
|
291 |
+
def parseLexicon(lex_str):
|
292 |
+
return fromstring(lex_str)
|
293 |
+
|
294 |
+
|
295 |
+
openccg_tinytiny = fromstring(
|
296 |
+
"""
|
297 |
+
# Rather minimal lexicon based on the openccg `tinytiny' grammar.
|
298 |
+
# Only incorporates a subset of the morphological subcategories, however.
|
299 |
+
:- S,NP,N # Primitive categories
|
300 |
+
Det :: NP/N # Determiners
|
301 |
+
Pro :: NP
|
302 |
+
IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
|
303 |
+
IntransVpl :: S\\NP[pl] # Plural
|
304 |
+
TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
|
305 |
+
TransVpl :: S\\NP[pl]/NP # Plural
|
306 |
+
|
307 |
+
the => NP[sg]/N[sg]
|
308 |
+
the => NP[pl]/N[pl]
|
309 |
+
|
310 |
+
I => Pro
|
311 |
+
me => Pro
|
312 |
+
we => Pro
|
313 |
+
us => Pro
|
314 |
+
|
315 |
+
book => N[sg]
|
316 |
+
books => N[pl]
|
317 |
+
|
318 |
+
peach => N[sg]
|
319 |
+
peaches => N[pl]
|
320 |
+
|
321 |
+
policeman => N[sg]
|
322 |
+
policemen => N[pl]
|
323 |
+
|
324 |
+
boy => N[sg]
|
325 |
+
boys => N[pl]
|
326 |
+
|
327 |
+
sleep => IntransVsg
|
328 |
+
sleep => IntransVpl
|
329 |
+
|
330 |
+
eat => IntransVpl
|
331 |
+
eat => TransVpl
|
332 |
+
eats => IntransVsg
|
333 |
+
eats => TransVsg
|
334 |
+
|
335 |
+
see => TransVpl
|
336 |
+
sees => TransVsg
|
337 |
+
"""
|
338 |
+
)
|
env-llmeval/lib/python3.10/site-packages/nltk/ccg/logic.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Tanin Na Nakorn (@tanin)
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
"""
|
8 |
+
Helper functions for CCG semantics computation
|
9 |
+
"""
|
10 |
+
|
11 |
+
from nltk.sem.logic import *
|
12 |
+
|
13 |
+
|
14 |
+
def compute_type_raised_semantics(semantics):
|
15 |
+
core = semantics
|
16 |
+
parent = None
|
17 |
+
while isinstance(core, LambdaExpression):
|
18 |
+
parent = core
|
19 |
+
core = core.term
|
20 |
+
|
21 |
+
var = Variable("F")
|
22 |
+
while var in core.free():
|
23 |
+
var = unique_variable(pattern=var)
|
24 |
+
core = ApplicationExpression(FunctionVariableExpression(var), core)
|
25 |
+
|
26 |
+
if parent is not None:
|
27 |
+
parent.term = core
|
28 |
+
else:
|
29 |
+
semantics = core
|
30 |
+
|
31 |
+
return LambdaExpression(var, semantics)
|
32 |
+
|
33 |
+
|
34 |
+
def compute_function_semantics(function, argument):
|
35 |
+
return ApplicationExpression(function, argument).simplify()
|
36 |
+
|
37 |
+
|
38 |
+
def compute_composition_semantics(function, argument):
|
39 |
+
assert isinstance(argument, LambdaExpression), (
|
40 |
+
"`" + str(argument) + "` must be a lambda expression"
|
41 |
+
)
|
42 |
+
return LambdaExpression(
|
43 |
+
argument.variable, ApplicationExpression(function, argument.term).simplify()
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
def compute_substitution_semantics(function, argument):
|
48 |
+
assert isinstance(function, LambdaExpression) and isinstance(
|
49 |
+
function.term, LambdaExpression
|
50 |
+
), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
|
51 |
+
assert isinstance(argument, LambdaExpression), (
|
52 |
+
"`" + str(argument) + "` must be a lambda expression"
|
53 |
+
)
|
54 |
+
|
55 |
+
new_argument = ApplicationExpression(
|
56 |
+
argument, VariableExpression(function.variable)
|
57 |
+
).simplify()
|
58 |
+
new_term = ApplicationExpression(function.term, new_argument).simplify()
|
59 |
+
|
60 |
+
return LambdaExpression(function.variable, new_term)
|
env-llmeval/lib/python3.10/site-packages/nltk/chunk/api.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Chunk parsing API
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# Steven Bird <[email protected]> (minor additions)
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
##//////////////////////////////////////////////////////
|
10 |
+
## Chunk Parser Interface
|
11 |
+
##//////////////////////////////////////////////////////
|
12 |
+
|
13 |
+
from nltk.chunk.util import ChunkScore
|
14 |
+
from nltk.internals import deprecated
|
15 |
+
from nltk.parse import ParserI
|
16 |
+
|
17 |
+
|
18 |
+
class ChunkParserI(ParserI):
|
19 |
+
"""
|
20 |
+
A processing interface for identifying non-overlapping groups in
|
21 |
+
unrestricted text. Typically, chunk parsers are used to find base
|
22 |
+
syntactic constituents, such as base noun phrases. Unlike
|
23 |
+
``ParserI``, ``ChunkParserI`` guarantees that the ``parse()`` method
|
24 |
+
will always generate a parse.
|
25 |
+
"""
|
26 |
+
|
27 |
+
def parse(self, tokens):
|
28 |
+
"""
|
29 |
+
Return the best chunk structure for the given tokens
|
30 |
+
and return a tree.
|
31 |
+
|
32 |
+
:param tokens: The list of (word, tag) tokens to be chunked.
|
33 |
+
:type tokens: list(tuple)
|
34 |
+
:rtype: Tree
|
35 |
+
"""
|
36 |
+
raise NotImplementedError()
|
37 |
+
|
38 |
+
@deprecated("Use accuracy(gold) instead.")
|
39 |
+
def evaluate(self, gold):
|
40 |
+
return self.accuracy(gold)
|
41 |
+
|
42 |
+
def accuracy(self, gold):
|
43 |
+
"""
|
44 |
+
Score the accuracy of the chunker against the gold standard.
|
45 |
+
Remove the chunking the gold standard text, rechunk it using
|
46 |
+
the chunker, and return a ``ChunkScore`` object
|
47 |
+
reflecting the performance of this chunk parser.
|
48 |
+
|
49 |
+
:type gold: list(Tree)
|
50 |
+
:param gold: The list of chunked sentences to score the chunker on.
|
51 |
+
:rtype: ChunkScore
|
52 |
+
"""
|
53 |
+
chunkscore = ChunkScore()
|
54 |
+
for correct in gold:
|
55 |
+
chunkscore.score(correct, self.parse(correct.leaves()))
|
56 |
+
return chunkscore
|
env-llmeval/lib/python3.10/site-packages/nltk/chunk/named_entity.py
ADDED
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Chunk parsing API
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Named entity chunker
|
10 |
+
"""
|
11 |
+
|
12 |
+
import os
|
13 |
+
import pickle
|
14 |
+
import re
|
15 |
+
from xml.etree import ElementTree as ET
|
16 |
+
|
17 |
+
from nltk.tag import ClassifierBasedTagger, pos_tag
|
18 |
+
|
19 |
+
try:
|
20 |
+
from nltk.classify import MaxentClassifier
|
21 |
+
except ImportError:
|
22 |
+
pass
|
23 |
+
|
24 |
+
from nltk.chunk.api import ChunkParserI
|
25 |
+
from nltk.chunk.util import ChunkScore
|
26 |
+
from nltk.data import find
|
27 |
+
from nltk.tokenize import word_tokenize
|
28 |
+
from nltk.tree import Tree
|
29 |
+
|
30 |
+
|
31 |
+
class NEChunkParserTagger(ClassifierBasedTagger):
|
32 |
+
"""
|
33 |
+
The IOB tagger used by the chunk parser.
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(self, train):
|
37 |
+
ClassifierBasedTagger.__init__(
|
38 |
+
self, train=train, classifier_builder=self._classifier_builder
|
39 |
+
)
|
40 |
+
|
41 |
+
def _classifier_builder(self, train):
|
42 |
+
return MaxentClassifier.train(
|
43 |
+
train, algorithm="megam", gaussian_prior_sigma=1, trace=2
|
44 |
+
)
|
45 |
+
|
46 |
+
def _english_wordlist(self):
|
47 |
+
try:
|
48 |
+
wl = self._en_wordlist
|
49 |
+
except AttributeError:
|
50 |
+
from nltk.corpus import words
|
51 |
+
|
52 |
+
self._en_wordlist = set(words.words("en-basic"))
|
53 |
+
wl = self._en_wordlist
|
54 |
+
return wl
|
55 |
+
|
56 |
+
def _feature_detector(self, tokens, index, history):
|
57 |
+
word = tokens[index][0]
|
58 |
+
pos = simplify_pos(tokens[index][1])
|
59 |
+
if index == 0:
|
60 |
+
prevword = prevprevword = None
|
61 |
+
prevpos = prevprevpos = None
|
62 |
+
prevshape = prevtag = prevprevtag = None
|
63 |
+
elif index == 1:
|
64 |
+
prevword = tokens[index - 1][0].lower()
|
65 |
+
prevprevword = None
|
66 |
+
prevpos = simplify_pos(tokens[index - 1][1])
|
67 |
+
prevprevpos = None
|
68 |
+
prevtag = history[index - 1][0]
|
69 |
+
prevshape = prevprevtag = None
|
70 |
+
else:
|
71 |
+
prevword = tokens[index - 1][0].lower()
|
72 |
+
prevprevword = tokens[index - 2][0].lower()
|
73 |
+
prevpos = simplify_pos(tokens[index - 1][1])
|
74 |
+
prevprevpos = simplify_pos(tokens[index - 2][1])
|
75 |
+
prevtag = history[index - 1]
|
76 |
+
prevprevtag = history[index - 2]
|
77 |
+
prevshape = shape(prevword)
|
78 |
+
if index == len(tokens) - 1:
|
79 |
+
nextword = nextnextword = None
|
80 |
+
nextpos = nextnextpos = None
|
81 |
+
elif index == len(tokens) - 2:
|
82 |
+
nextword = tokens[index + 1][0].lower()
|
83 |
+
nextpos = tokens[index + 1][1].lower()
|
84 |
+
nextnextword = None
|
85 |
+
nextnextpos = None
|
86 |
+
else:
|
87 |
+
nextword = tokens[index + 1][0].lower()
|
88 |
+
nextpos = tokens[index + 1][1].lower()
|
89 |
+
nextnextword = tokens[index + 2][0].lower()
|
90 |
+
nextnextpos = tokens[index + 2][1].lower()
|
91 |
+
|
92 |
+
# 89.6
|
93 |
+
features = {
|
94 |
+
"bias": True,
|
95 |
+
"shape": shape(word),
|
96 |
+
"wordlen": len(word),
|
97 |
+
"prefix3": word[:3].lower(),
|
98 |
+
"suffix3": word[-3:].lower(),
|
99 |
+
"pos": pos,
|
100 |
+
"word": word,
|
101 |
+
"en-wordlist": (word in self._english_wordlist()),
|
102 |
+
"prevtag": prevtag,
|
103 |
+
"prevpos": prevpos,
|
104 |
+
"nextpos": nextpos,
|
105 |
+
"prevword": prevword,
|
106 |
+
"nextword": nextword,
|
107 |
+
"word+nextpos": f"{word.lower()}+{nextpos}",
|
108 |
+
"pos+prevtag": f"{pos}+{prevtag}",
|
109 |
+
"shape+prevtag": f"{prevshape}+{prevtag}",
|
110 |
+
}
|
111 |
+
|
112 |
+
return features
|
113 |
+
|
114 |
+
|
115 |
+
class NEChunkParser(ChunkParserI):
|
116 |
+
"""
|
117 |
+
Expected input: list of pos-tagged words
|
118 |
+
"""
|
119 |
+
|
120 |
+
def __init__(self, train):
|
121 |
+
self._train(train)
|
122 |
+
|
123 |
+
def parse(self, tokens):
|
124 |
+
"""
|
125 |
+
Each token should be a pos-tagged word
|
126 |
+
"""
|
127 |
+
tagged = self._tagger.tag(tokens)
|
128 |
+
tree = self._tagged_to_parse(tagged)
|
129 |
+
return tree
|
130 |
+
|
131 |
+
def _train(self, corpus):
|
132 |
+
# Convert to tagged sequence
|
133 |
+
corpus = [self._parse_to_tagged(s) for s in corpus]
|
134 |
+
|
135 |
+
self._tagger = NEChunkParserTagger(train=corpus)
|
136 |
+
|
137 |
+
def _tagged_to_parse(self, tagged_tokens):
|
138 |
+
"""
|
139 |
+
Convert a list of tagged tokens to a chunk-parse tree.
|
140 |
+
"""
|
141 |
+
sent = Tree("S", [])
|
142 |
+
|
143 |
+
for (tok, tag) in tagged_tokens:
|
144 |
+
if tag == "O":
|
145 |
+
sent.append(tok)
|
146 |
+
elif tag.startswith("B-"):
|
147 |
+
sent.append(Tree(tag[2:], [tok]))
|
148 |
+
elif tag.startswith("I-"):
|
149 |
+
if sent and isinstance(sent[-1], Tree) and sent[-1].label() == tag[2:]:
|
150 |
+
sent[-1].append(tok)
|
151 |
+
else:
|
152 |
+
sent.append(Tree(tag[2:], [tok]))
|
153 |
+
return sent
|
154 |
+
|
155 |
+
@staticmethod
|
156 |
+
def _parse_to_tagged(sent):
|
157 |
+
"""
|
158 |
+
Convert a chunk-parse tree to a list of tagged tokens.
|
159 |
+
"""
|
160 |
+
toks = []
|
161 |
+
for child in sent:
|
162 |
+
if isinstance(child, Tree):
|
163 |
+
if len(child) == 0:
|
164 |
+
print("Warning -- empty chunk in sentence")
|
165 |
+
continue
|
166 |
+
toks.append((child[0], f"B-{child.label()}"))
|
167 |
+
for tok in child[1:]:
|
168 |
+
toks.append((tok, f"I-{child.label()}"))
|
169 |
+
else:
|
170 |
+
toks.append((child, "O"))
|
171 |
+
return toks
|
172 |
+
|
173 |
+
|
174 |
+
def shape(word):
|
175 |
+
if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word, re.UNICODE):
|
176 |
+
return "number"
|
177 |
+
elif re.match(r"\W+$", word, re.UNICODE):
|
178 |
+
return "punct"
|
179 |
+
elif re.match(r"\w+$", word, re.UNICODE):
|
180 |
+
if word.istitle():
|
181 |
+
return "upcase"
|
182 |
+
elif word.islower():
|
183 |
+
return "downcase"
|
184 |
+
else:
|
185 |
+
return "mixedcase"
|
186 |
+
else:
|
187 |
+
return "other"
|
188 |
+
|
189 |
+
|
190 |
+
def simplify_pos(s):
|
191 |
+
if s.startswith("V"):
|
192 |
+
return "V"
|
193 |
+
else:
|
194 |
+
return s.split("-")[0]
|
195 |
+
|
196 |
+
|
197 |
+
def postag_tree(tree):
|
198 |
+
# Part-of-speech tagging.
|
199 |
+
words = tree.leaves()
|
200 |
+
tag_iter = (pos for (word, pos) in pos_tag(words))
|
201 |
+
newtree = Tree("S", [])
|
202 |
+
for child in tree:
|
203 |
+
if isinstance(child, Tree):
|
204 |
+
newtree.append(Tree(child.label(), []))
|
205 |
+
for subchild in child:
|
206 |
+
newtree[-1].append((subchild, next(tag_iter)))
|
207 |
+
else:
|
208 |
+
newtree.append((child, next(tag_iter)))
|
209 |
+
return newtree
|
210 |
+
|
211 |
+
|
212 |
+
def load_ace_data(roots, fmt="binary", skip_bnews=True):
|
213 |
+
for root in roots:
|
214 |
+
for root, dirs, files in os.walk(root):
|
215 |
+
if root.endswith("bnews") and skip_bnews:
|
216 |
+
continue
|
217 |
+
for f in files:
|
218 |
+
if f.endswith(".sgm"):
|
219 |
+
yield from load_ace_file(os.path.join(root, f), fmt)
|
220 |
+
|
221 |
+
|
222 |
+
def load_ace_file(textfile, fmt):
|
223 |
+
print(f" - {os.path.split(textfile)[1]}")
|
224 |
+
annfile = textfile + ".tmx.rdc.xml"
|
225 |
+
|
226 |
+
# Read the xml file, and get a list of entities
|
227 |
+
entities = []
|
228 |
+
with open(annfile) as infile:
|
229 |
+
xml = ET.parse(infile).getroot()
|
230 |
+
for entity in xml.findall("document/entity"):
|
231 |
+
typ = entity.find("entity_type").text
|
232 |
+
for mention in entity.findall("entity_mention"):
|
233 |
+
if mention.get("TYPE") != "NAME":
|
234 |
+
continue # only NEs
|
235 |
+
s = int(mention.find("head/charseq/start").text)
|
236 |
+
e = int(mention.find("head/charseq/end").text) + 1
|
237 |
+
entities.append((s, e, typ))
|
238 |
+
|
239 |
+
# Read the text file, and mark the entities.
|
240 |
+
with open(textfile) as infile:
|
241 |
+
text = infile.read()
|
242 |
+
|
243 |
+
# Strip XML tags, since they don't count towards the indices
|
244 |
+
text = re.sub("<(?!/?TEXT)[^>]+>", "", text)
|
245 |
+
|
246 |
+
# Blank out anything before/after <TEXT>
|
247 |
+
def subfunc(m):
|
248 |
+
return " " * (m.end() - m.start() - 6)
|
249 |
+
|
250 |
+
text = re.sub(r"[\s\S]*<TEXT>", subfunc, text)
|
251 |
+
text = re.sub(r"</TEXT>[\s\S]*", "", text)
|
252 |
+
|
253 |
+
# Simplify quotes
|
254 |
+
text = re.sub("``", ' "', text)
|
255 |
+
text = re.sub("''", '" ', text)
|
256 |
+
|
257 |
+
entity_types = {typ for (s, e, typ) in entities}
|
258 |
+
|
259 |
+
# Binary distinction (NE or not NE)
|
260 |
+
if fmt == "binary":
|
261 |
+
i = 0
|
262 |
+
toks = Tree("S", [])
|
263 |
+
for (s, e, typ) in sorted(entities):
|
264 |
+
if s < i:
|
265 |
+
s = i # Overlapping! Deal with this better?
|
266 |
+
if e <= s:
|
267 |
+
continue
|
268 |
+
toks.extend(word_tokenize(text[i:s]))
|
269 |
+
toks.append(Tree("NE", text[s:e].split()))
|
270 |
+
i = e
|
271 |
+
toks.extend(word_tokenize(text[i:]))
|
272 |
+
yield toks
|
273 |
+
|
274 |
+
# Multiclass distinction (NE type)
|
275 |
+
elif fmt == "multiclass":
|
276 |
+
i = 0
|
277 |
+
toks = Tree("S", [])
|
278 |
+
for (s, e, typ) in sorted(entities):
|
279 |
+
if s < i:
|
280 |
+
s = i # Overlapping! Deal with this better?
|
281 |
+
if e <= s:
|
282 |
+
continue
|
283 |
+
toks.extend(word_tokenize(text[i:s]))
|
284 |
+
toks.append(Tree(typ, text[s:e].split()))
|
285 |
+
i = e
|
286 |
+
toks.extend(word_tokenize(text[i:]))
|
287 |
+
yield toks
|
288 |
+
|
289 |
+
else:
|
290 |
+
raise ValueError("bad fmt value")
|
291 |
+
|
292 |
+
|
293 |
+
# This probably belongs in a more general-purpose location (as does
|
294 |
+
# the parse_to_tagged function).
|
295 |
+
def cmp_chunks(correct, guessed):
|
296 |
+
correct = NEChunkParser._parse_to_tagged(correct)
|
297 |
+
guessed = NEChunkParser._parse_to_tagged(guessed)
|
298 |
+
ellipsis = False
|
299 |
+
for (w, ct), (w, gt) in zip(correct, guessed):
|
300 |
+
if ct == gt == "O":
|
301 |
+
if not ellipsis:
|
302 |
+
print(f" {ct:15} {gt:15} {w}")
|
303 |
+
print(" {:15} {:15} {2}".format("...", "...", "..."))
|
304 |
+
ellipsis = True
|
305 |
+
else:
|
306 |
+
ellipsis = False
|
307 |
+
print(f" {ct:15} {gt:15} {w}")
|
308 |
+
|
309 |
+
|
310 |
+
def build_model(fmt="binary"):
|
311 |
+
print("Loading training data...")
|
312 |
+
train_paths = [
|
313 |
+
find("corpora/ace_data/ace.dev"),
|
314 |
+
find("corpora/ace_data/ace.heldout"),
|
315 |
+
find("corpora/ace_data/bbn.dev"),
|
316 |
+
find("corpora/ace_data/muc.dev"),
|
317 |
+
]
|
318 |
+
train_trees = load_ace_data(train_paths, fmt)
|
319 |
+
train_data = [postag_tree(t) for t in train_trees]
|
320 |
+
print("Training...")
|
321 |
+
cp = NEChunkParser(train_data)
|
322 |
+
del train_data
|
323 |
+
|
324 |
+
print("Loading eval data...")
|
325 |
+
eval_paths = [find("corpora/ace_data/ace.eval")]
|
326 |
+
eval_trees = load_ace_data(eval_paths, fmt)
|
327 |
+
eval_data = [postag_tree(t) for t in eval_trees]
|
328 |
+
|
329 |
+
print("Evaluating...")
|
330 |
+
chunkscore = ChunkScore()
|
331 |
+
for i, correct in enumerate(eval_data):
|
332 |
+
guess = cp.parse(correct.leaves())
|
333 |
+
chunkscore.score(correct, guess)
|
334 |
+
if i < 3:
|
335 |
+
cmp_chunks(correct, guess)
|
336 |
+
print(chunkscore)
|
337 |
+
|
338 |
+
outfilename = f"/tmp/ne_chunker_{fmt}.pickle"
|
339 |
+
print(f"Saving chunker to {outfilename}...")
|
340 |
+
|
341 |
+
with open(outfilename, "wb") as outfile:
|
342 |
+
pickle.dump(cp, outfile, -1)
|
343 |
+
|
344 |
+
return cp
|
345 |
+
|
346 |
+
|
347 |
+
if __name__ == "__main__":
|
348 |
+
# Make sure that the pickled object has the right class name:
|
349 |
+
from nltk.chunk.named_entity import build_model
|
350 |
+
|
351 |
+
build_model("binary")
|
352 |
+
build_model("multiclass")
|
env-llmeval/lib/python3.10/site-packages/nltk/chunk/util.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Chunk format conversions
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# Steven Bird <[email protected]> (minor additions)
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
import re
|
10 |
+
|
11 |
+
from nltk.metrics import accuracy as _accuracy
|
12 |
+
from nltk.tag.mapping import map_tag
|
13 |
+
from nltk.tag.util import str2tuple
|
14 |
+
from nltk.tree import Tree
|
15 |
+
|
16 |
+
##//////////////////////////////////////////////////////
|
17 |
+
## EVALUATION
|
18 |
+
##//////////////////////////////////////////////////////
|
19 |
+
|
20 |
+
|
21 |
+
def accuracy(chunker, gold):
|
22 |
+
"""
|
23 |
+
Score the accuracy of the chunker against the gold standard.
|
24 |
+
Strip the chunk information from the gold standard and rechunk it using
|
25 |
+
the chunker, then compute the accuracy score.
|
26 |
+
|
27 |
+
:type chunker: ChunkParserI
|
28 |
+
:param chunker: The chunker being evaluated.
|
29 |
+
:type gold: tree
|
30 |
+
:param gold: The chunk structures to score the chunker on.
|
31 |
+
:rtype: float
|
32 |
+
"""
|
33 |
+
|
34 |
+
gold_tags = []
|
35 |
+
test_tags = []
|
36 |
+
for gold_tree in gold:
|
37 |
+
test_tree = chunker.parse(gold_tree.flatten())
|
38 |
+
gold_tags += tree2conlltags(gold_tree)
|
39 |
+
test_tags += tree2conlltags(test_tree)
|
40 |
+
|
41 |
+
# print 'GOLD:', gold_tags[:50]
|
42 |
+
# print 'TEST:', test_tags[:50]
|
43 |
+
return _accuracy(gold_tags, test_tags)
|
44 |
+
|
45 |
+
|
46 |
+
# Patched for increased performance by Yoav Goldberg <[email protected]>, 2006-01-13
|
47 |
+
# -- statistics are evaluated only on demand, instead of at every sentence evaluation
|
48 |
+
#
|
49 |
+
# SB: use nltk.metrics for precision/recall scoring?
|
50 |
+
#
|
51 |
+
class ChunkScore:
|
52 |
+
"""
|
53 |
+
A utility class for scoring chunk parsers. ``ChunkScore`` can
|
54 |
+
evaluate a chunk parser's output, based on a number of statistics
|
55 |
+
(precision, recall, f-measure, misssed chunks, incorrect chunks).
|
56 |
+
It can also combine the scores from the parsing of multiple texts;
|
57 |
+
this makes it significantly easier to evaluate a chunk parser that
|
58 |
+
operates one sentence at a time.
|
59 |
+
|
60 |
+
Texts are evaluated with the ``score`` method. The results of
|
61 |
+
evaluation can be accessed via a number of accessor methods, such
|
62 |
+
as ``precision`` and ``f_measure``. A typical use of the
|
63 |
+
``ChunkScore`` class is::
|
64 |
+
|
65 |
+
>>> chunkscore = ChunkScore() # doctest: +SKIP
|
66 |
+
>>> for correct in correct_sentences: # doctest: +SKIP
|
67 |
+
... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP
|
68 |
+
... chunkscore.score(correct, guess) # doctest: +SKIP
|
69 |
+
>>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP
|
70 |
+
F Measure: 0.823
|
71 |
+
|
72 |
+
:ivar kwargs: Keyword arguments:
|
73 |
+
|
74 |
+
- max_tp_examples: The maximum number actual examples of true
|
75 |
+
positives to record. This affects the ``correct`` member
|
76 |
+
function: ``correct`` will not return more than this number
|
77 |
+
of true positive examples. This does *not* affect any of
|
78 |
+
the numerical metrics (precision, recall, or f-measure)
|
79 |
+
|
80 |
+
- max_fp_examples: The maximum number actual examples of false
|
81 |
+
positives to record. This affects the ``incorrect`` member
|
82 |
+
function and the ``guessed`` member function: ``incorrect``
|
83 |
+
will not return more than this number of examples, and
|
84 |
+
``guessed`` will not return more than this number of true
|
85 |
+
positive examples. This does *not* affect any of the
|
86 |
+
numerical metrics (precision, recall, or f-measure)
|
87 |
+
|
88 |
+
- max_fn_examples: The maximum number actual examples of false
|
89 |
+
negatives to record. This affects the ``missed`` member
|
90 |
+
function and the ``correct`` member function: ``missed``
|
91 |
+
will not return more than this number of examples, and
|
92 |
+
``correct`` will not return more than this number of true
|
93 |
+
negative examples. This does *not* affect any of the
|
94 |
+
numerical metrics (precision, recall, or f-measure)
|
95 |
+
|
96 |
+
- chunk_label: A regular expression indicating which chunks
|
97 |
+
should be compared. Defaults to ``'.*'`` (i.e., all chunks).
|
98 |
+
|
99 |
+
:type _tp: list(Token)
|
100 |
+
:ivar _tp: List of true positives
|
101 |
+
:type _fp: list(Token)
|
102 |
+
:ivar _fp: List of false positives
|
103 |
+
:type _fn: list(Token)
|
104 |
+
:ivar _fn: List of false negatives
|
105 |
+
|
106 |
+
:type _tp_num: int
|
107 |
+
:ivar _tp_num: Number of true positives
|
108 |
+
:type _fp_num: int
|
109 |
+
:ivar _fp_num: Number of false positives
|
110 |
+
:type _fn_num: int
|
111 |
+
:ivar _fn_num: Number of false negatives.
|
112 |
+
"""
|
113 |
+
|
114 |
+
def __init__(self, **kwargs):
|
115 |
+
self._correct = set()
|
116 |
+
self._guessed = set()
|
117 |
+
self._tp = set()
|
118 |
+
self._fp = set()
|
119 |
+
self._fn = set()
|
120 |
+
self._max_tp = kwargs.get("max_tp_examples", 100)
|
121 |
+
self._max_fp = kwargs.get("max_fp_examples", 100)
|
122 |
+
self._max_fn = kwargs.get("max_fn_examples", 100)
|
123 |
+
self._chunk_label = kwargs.get("chunk_label", ".*")
|
124 |
+
self._tp_num = 0
|
125 |
+
self._fp_num = 0
|
126 |
+
self._fn_num = 0
|
127 |
+
self._count = 0
|
128 |
+
self._tags_correct = 0.0
|
129 |
+
self._tags_total = 0.0
|
130 |
+
|
131 |
+
self._measuresNeedUpdate = False
|
132 |
+
|
133 |
+
def _updateMeasures(self):
|
134 |
+
if self._measuresNeedUpdate:
|
135 |
+
self._tp = self._guessed & self._correct
|
136 |
+
self._fn = self._correct - self._guessed
|
137 |
+
self._fp = self._guessed - self._correct
|
138 |
+
self._tp_num = len(self._tp)
|
139 |
+
self._fp_num = len(self._fp)
|
140 |
+
self._fn_num = len(self._fn)
|
141 |
+
self._measuresNeedUpdate = False
|
142 |
+
|
143 |
+
def score(self, correct, guessed):
|
144 |
+
"""
|
145 |
+
Given a correctly chunked sentence, score another chunked
|
146 |
+
version of the same sentence.
|
147 |
+
|
148 |
+
:type correct: chunk structure
|
149 |
+
:param correct: The known-correct ("gold standard") chunked
|
150 |
+
sentence.
|
151 |
+
:type guessed: chunk structure
|
152 |
+
:param guessed: The chunked sentence to be scored.
|
153 |
+
"""
|
154 |
+
self._correct |= _chunksets(correct, self._count, self._chunk_label)
|
155 |
+
self._guessed |= _chunksets(guessed, self._count, self._chunk_label)
|
156 |
+
self._count += 1
|
157 |
+
self._measuresNeedUpdate = True
|
158 |
+
# Keep track of per-tag accuracy (if possible)
|
159 |
+
try:
|
160 |
+
correct_tags = tree2conlltags(correct)
|
161 |
+
guessed_tags = tree2conlltags(guessed)
|
162 |
+
except ValueError:
|
163 |
+
# This exception case is for nested chunk structures,
|
164 |
+
# where tree2conlltags will fail with a ValueError: "Tree
|
165 |
+
# is too deeply nested to be printed in CoNLL format."
|
166 |
+
correct_tags = guessed_tags = ()
|
167 |
+
self._tags_total += len(correct_tags)
|
168 |
+
self._tags_correct += sum(
|
169 |
+
1 for (t, g) in zip(guessed_tags, correct_tags) if t == g
|
170 |
+
)
|
171 |
+
|
172 |
+
def accuracy(self):
|
173 |
+
"""
|
174 |
+
Return the overall tag-based accuracy for all text that have
|
175 |
+
been scored by this ``ChunkScore``, using the IOB (conll2000)
|
176 |
+
tag encoding.
|
177 |
+
|
178 |
+
:rtype: float
|
179 |
+
"""
|
180 |
+
if self._tags_total == 0:
|
181 |
+
return 1
|
182 |
+
return self._tags_correct / self._tags_total
|
183 |
+
|
184 |
+
def precision(self):
|
185 |
+
"""
|
186 |
+
Return the overall precision for all texts that have been
|
187 |
+
scored by this ``ChunkScore``.
|
188 |
+
|
189 |
+
:rtype: float
|
190 |
+
"""
|
191 |
+
self._updateMeasures()
|
192 |
+
div = self._tp_num + self._fp_num
|
193 |
+
if div == 0:
|
194 |
+
return 0
|
195 |
+
else:
|
196 |
+
return self._tp_num / div
|
197 |
+
|
198 |
+
def recall(self):
|
199 |
+
"""
|
200 |
+
Return the overall recall for all texts that have been
|
201 |
+
scored by this ``ChunkScore``.
|
202 |
+
|
203 |
+
:rtype: float
|
204 |
+
"""
|
205 |
+
self._updateMeasures()
|
206 |
+
div = self._tp_num + self._fn_num
|
207 |
+
if div == 0:
|
208 |
+
return 0
|
209 |
+
else:
|
210 |
+
return self._tp_num / div
|
211 |
+
|
212 |
+
def f_measure(self, alpha=0.5):
|
213 |
+
"""
|
214 |
+
Return the overall F measure for all texts that have been
|
215 |
+
scored by this ``ChunkScore``.
|
216 |
+
|
217 |
+
:param alpha: the relative weighting of precision and recall.
|
218 |
+
Larger alpha biases the score towards the precision value,
|
219 |
+
while smaller alpha biases the score towards the recall
|
220 |
+
value. ``alpha`` should have a value in the range [0,1].
|
221 |
+
:type alpha: float
|
222 |
+
:rtype: float
|
223 |
+
"""
|
224 |
+
self._updateMeasures()
|
225 |
+
p = self.precision()
|
226 |
+
r = self.recall()
|
227 |
+
if p == 0 or r == 0: # what if alpha is 0 or 1?
|
228 |
+
return 0
|
229 |
+
return 1 / (alpha / p + (1 - alpha) / r)
|
230 |
+
|
231 |
+
def missed(self):
|
232 |
+
"""
|
233 |
+
Return the chunks which were included in the
|
234 |
+
correct chunk structures, but not in the guessed chunk
|
235 |
+
structures, listed in input order.
|
236 |
+
|
237 |
+
:rtype: list of chunks
|
238 |
+
"""
|
239 |
+
self._updateMeasures()
|
240 |
+
chunks = list(self._fn)
|
241 |
+
return [c[1] for c in chunks] # discard position information
|
242 |
+
|
243 |
+
def incorrect(self):
|
244 |
+
"""
|
245 |
+
Return the chunks which were included in the guessed chunk structures,
|
246 |
+
but not in the correct chunk structures, listed in input order.
|
247 |
+
|
248 |
+
:rtype: list of chunks
|
249 |
+
"""
|
250 |
+
self._updateMeasures()
|
251 |
+
chunks = list(self._fp)
|
252 |
+
return [c[1] for c in chunks] # discard position information
|
253 |
+
|
254 |
+
def correct(self):
|
255 |
+
"""
|
256 |
+
Return the chunks which were included in the correct
|
257 |
+
chunk structures, listed in input order.
|
258 |
+
|
259 |
+
:rtype: list of chunks
|
260 |
+
"""
|
261 |
+
chunks = list(self._correct)
|
262 |
+
return [c[1] for c in chunks] # discard position information
|
263 |
+
|
264 |
+
def guessed(self):
|
265 |
+
"""
|
266 |
+
Return the chunks which were included in the guessed
|
267 |
+
chunk structures, listed in input order.
|
268 |
+
|
269 |
+
:rtype: list of chunks
|
270 |
+
"""
|
271 |
+
chunks = list(self._guessed)
|
272 |
+
return [c[1] for c in chunks] # discard position information
|
273 |
+
|
274 |
+
def __len__(self):
|
275 |
+
self._updateMeasures()
|
276 |
+
return self._tp_num + self._fn_num
|
277 |
+
|
278 |
+
def __repr__(self):
|
279 |
+
"""
|
280 |
+
Return a concise representation of this ``ChunkScoring``.
|
281 |
+
|
282 |
+
:rtype: str
|
283 |
+
"""
|
284 |
+
return "<ChunkScoring of " + repr(len(self)) + " chunks>"
|
285 |
+
|
286 |
+
def __str__(self):
|
287 |
+
"""
|
288 |
+
Return a verbose representation of this ``ChunkScoring``.
|
289 |
+
This representation includes the precision, recall, and
|
290 |
+
f-measure scores. For other information about the score,
|
291 |
+
use the accessor methods (e.g., ``missed()`` and ``incorrect()``).
|
292 |
+
|
293 |
+
:rtype: str
|
294 |
+
"""
|
295 |
+
return (
|
296 |
+
"ChunkParse score:\n"
|
297 |
+
+ (f" IOB Accuracy: {self.accuracy() * 100:5.1f}%%\n")
|
298 |
+
+ (f" Precision: {self.precision() * 100:5.1f}%%\n")
|
299 |
+
+ (f" Recall: {self.recall() * 100:5.1f}%%\n")
|
300 |
+
+ (f" F-Measure: {self.f_measure() * 100:5.1f}%%")
|
301 |
+
)
|
302 |
+
|
303 |
+
|
304 |
+
# extract chunks, and assign unique id, the absolute position of
|
305 |
+
# the first word of the chunk
|
306 |
+
def _chunksets(t, count, chunk_label):
|
307 |
+
pos = 0
|
308 |
+
chunks = []
|
309 |
+
for child in t:
|
310 |
+
if isinstance(child, Tree):
|
311 |
+
if re.match(chunk_label, child.label()):
|
312 |
+
chunks.append(((count, pos), child.freeze()))
|
313 |
+
pos += len(child.leaves())
|
314 |
+
else:
|
315 |
+
pos += 1
|
316 |
+
return set(chunks)
|
317 |
+
|
318 |
+
|
319 |
+
def tagstr2tree(
|
320 |
+
s, chunk_label="NP", root_label="S", sep="/", source_tagset=None, target_tagset=None
|
321 |
+
):
|
322 |
+
"""
|
323 |
+
Divide a string of bracketted tagged text into
|
324 |
+
chunks and unchunked tokens, and produce a Tree.
|
325 |
+
Chunks are marked by square brackets (``[...]``). Words are
|
326 |
+
delimited by whitespace, and each word should have the form
|
327 |
+
``text/tag``. Words that do not contain a slash are
|
328 |
+
assigned a ``tag`` of None.
|
329 |
+
|
330 |
+
:param s: The string to be converted
|
331 |
+
:type s: str
|
332 |
+
:param chunk_label: The label to use for chunk nodes
|
333 |
+
:type chunk_label: str
|
334 |
+
:param root_label: The label to use for the root of the tree
|
335 |
+
:type root_label: str
|
336 |
+
:rtype: Tree
|
337 |
+
"""
|
338 |
+
|
339 |
+
WORD_OR_BRACKET = re.compile(r"\[|\]|[^\[\]\s]+")
|
340 |
+
|
341 |
+
stack = [Tree(root_label, [])]
|
342 |
+
for match in WORD_OR_BRACKET.finditer(s):
|
343 |
+
text = match.group()
|
344 |
+
if text[0] == "[":
|
345 |
+
if len(stack) != 1:
|
346 |
+
raise ValueError(f"Unexpected [ at char {match.start():d}")
|
347 |
+
chunk = Tree(chunk_label, [])
|
348 |
+
stack[-1].append(chunk)
|
349 |
+
stack.append(chunk)
|
350 |
+
elif text[0] == "]":
|
351 |
+
if len(stack) != 2:
|
352 |
+
raise ValueError(f"Unexpected ] at char {match.start():d}")
|
353 |
+
stack.pop()
|
354 |
+
else:
|
355 |
+
if sep is None:
|
356 |
+
stack[-1].append(text)
|
357 |
+
else:
|
358 |
+
word, tag = str2tuple(text, sep)
|
359 |
+
if source_tagset and target_tagset:
|
360 |
+
tag = map_tag(source_tagset, target_tagset, tag)
|
361 |
+
stack[-1].append((word, tag))
|
362 |
+
|
363 |
+
if len(stack) != 1:
|
364 |
+
raise ValueError(f"Expected ] at char {len(s):d}")
|
365 |
+
return stack[0]
|
366 |
+
|
367 |
+
|
368 |
+
### CONLL
|
369 |
+
|
370 |
+
_LINE_RE = re.compile(r"(\S+)\s+(\S+)\s+([IOB])-?(\S+)?")
|
371 |
+
|
372 |
+
|
373 |
+
def conllstr2tree(s, chunk_types=("NP", "PP", "VP"), root_label="S"):
|
374 |
+
"""
|
375 |
+
Return a chunk structure for a single sentence
|
376 |
+
encoded in the given CONLL 2000 style string.
|
377 |
+
This function converts a CoNLL IOB string into a tree.
|
378 |
+
It uses the specified chunk types
|
379 |
+
(defaults to NP, PP and VP), and creates a tree rooted at a node
|
380 |
+
labeled S (by default).
|
381 |
+
|
382 |
+
:param s: The CoNLL string to be converted.
|
383 |
+
:type s: str
|
384 |
+
:param chunk_types: The chunk types to be converted.
|
385 |
+
:type chunk_types: tuple
|
386 |
+
:param root_label: The node label to use for the root.
|
387 |
+
:type root_label: str
|
388 |
+
:rtype: Tree
|
389 |
+
"""
|
390 |
+
|
391 |
+
stack = [Tree(root_label, [])]
|
392 |
+
|
393 |
+
for lineno, line in enumerate(s.split("\n")):
|
394 |
+
if not line.strip():
|
395 |
+
continue
|
396 |
+
|
397 |
+
# Decode the line.
|
398 |
+
match = _LINE_RE.match(line)
|
399 |
+
if match is None:
|
400 |
+
raise ValueError(f"Error on line {lineno:d}")
|
401 |
+
(word, tag, state, chunk_type) = match.groups()
|
402 |
+
|
403 |
+
# If it's a chunk type we don't care about, treat it as O.
|
404 |
+
if chunk_types is not None and chunk_type not in chunk_types:
|
405 |
+
state = "O"
|
406 |
+
|
407 |
+
# For "Begin"/"Outside", finish any completed chunks -
|
408 |
+
# also do so for "Inside" which don't match the previous token.
|
409 |
+
mismatch_I = state == "I" and chunk_type != stack[-1].label()
|
410 |
+
if state in "BO" or mismatch_I:
|
411 |
+
if len(stack) == 2:
|
412 |
+
stack.pop()
|
413 |
+
|
414 |
+
# For "Begin", start a new chunk.
|
415 |
+
if state == "B" or mismatch_I:
|
416 |
+
chunk = Tree(chunk_type, [])
|
417 |
+
stack[-1].append(chunk)
|
418 |
+
stack.append(chunk)
|
419 |
+
|
420 |
+
# Add the new word token.
|
421 |
+
stack[-1].append((word, tag))
|
422 |
+
|
423 |
+
return stack[0]
|
424 |
+
|
425 |
+
|
426 |
+
def tree2conlltags(t):
|
427 |
+
"""
|
428 |
+
Return a list of 3-tuples containing ``(word, tag, IOB-tag)``.
|
429 |
+
Convert a tree to the CoNLL IOB tag format.
|
430 |
+
|
431 |
+
:param t: The tree to be converted.
|
432 |
+
:type t: Tree
|
433 |
+
:rtype: list(tuple)
|
434 |
+
"""
|
435 |
+
|
436 |
+
tags = []
|
437 |
+
for child in t:
|
438 |
+
try:
|
439 |
+
category = child.label()
|
440 |
+
prefix = "B-"
|
441 |
+
for contents in child:
|
442 |
+
if isinstance(contents, Tree):
|
443 |
+
raise ValueError(
|
444 |
+
"Tree is too deeply nested to be printed in CoNLL format"
|
445 |
+
)
|
446 |
+
tags.append((contents[0], contents[1], prefix + category))
|
447 |
+
prefix = "I-"
|
448 |
+
except AttributeError:
|
449 |
+
tags.append((child[0], child[1], "O"))
|
450 |
+
return tags
|
451 |
+
|
452 |
+
|
453 |
+
def conlltags2tree(
|
454 |
+
sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False
|
455 |
+
):
|
456 |
+
"""
|
457 |
+
Convert the CoNLL IOB format to a tree.
|
458 |
+
"""
|
459 |
+
tree = Tree(root_label, [])
|
460 |
+
for (word, postag, chunktag) in sentence:
|
461 |
+
if chunktag is None:
|
462 |
+
if strict:
|
463 |
+
raise ValueError("Bad conll tag sequence")
|
464 |
+
else:
|
465 |
+
# Treat as O
|
466 |
+
tree.append((word, postag))
|
467 |
+
elif chunktag.startswith("B-"):
|
468 |
+
tree.append(Tree(chunktag[2:], [(word, postag)]))
|
469 |
+
elif chunktag.startswith("I-"):
|
470 |
+
if (
|
471 |
+
len(tree) == 0
|
472 |
+
or not isinstance(tree[-1], Tree)
|
473 |
+
or tree[-1].label() != chunktag[2:]
|
474 |
+
):
|
475 |
+
if strict:
|
476 |
+
raise ValueError("Bad conll tag sequence")
|
477 |
+
else:
|
478 |
+
# Treat as B-*
|
479 |
+
tree.append(Tree(chunktag[2:], [(word, postag)]))
|
480 |
+
else:
|
481 |
+
tree[-1].append((word, postag))
|
482 |
+
elif chunktag == "O":
|
483 |
+
tree.append((word, postag))
|
484 |
+
else:
|
485 |
+
raise ValueError(f"Bad conll tag {chunktag!r}")
|
486 |
+
return tree
|
487 |
+
|
488 |
+
|
489 |
+
def tree2conllstr(t):
|
490 |
+
"""
|
491 |
+
Return a multiline string where each line contains a word, tag and IOB tag.
|
492 |
+
Convert a tree to the CoNLL IOB string format
|
493 |
+
|
494 |
+
:param t: The tree to be converted.
|
495 |
+
:type t: Tree
|
496 |
+
:rtype: str
|
497 |
+
"""
|
498 |
+
lines = [" ".join(token) for token in tree2conlltags(t)]
|
499 |
+
return "\n".join(lines)
|
500 |
+
|
501 |
+
|
502 |
+
### IEER
|
503 |
+
|
504 |
+
_IEER_DOC_RE = re.compile(
|
505 |
+
r"<DOC>\s*"
|
506 |
+
r"(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?"
|
507 |
+
r"(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?"
|
508 |
+
r"(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?"
|
509 |
+
r"<BODY>\s*"
|
510 |
+
r"(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?"
|
511 |
+
r"<TEXT>(?P<text>.*?)</TEXT>\s*"
|
512 |
+
r"</BODY>\s*</DOC>\s*",
|
513 |
+
re.DOTALL,
|
514 |
+
)
|
515 |
+
|
516 |
+
_IEER_TYPE_RE = re.compile(r'<b_\w+\s+[^>]*?type="(?P<type>\w+)"')
|
517 |
+
|
518 |
+
|
519 |
+
def _ieer_read_text(s, root_label):
|
520 |
+
stack = [Tree(root_label, [])]
|
521 |
+
# s will be None if there is no headline in the text
|
522 |
+
# return the empty list in place of a Tree
|
523 |
+
if s is None:
|
524 |
+
return []
|
525 |
+
for piece_m in re.finditer(r"<[^>]+>|[^\s<]+", s):
|
526 |
+
piece = piece_m.group()
|
527 |
+
try:
|
528 |
+
if piece.startswith("<b_"):
|
529 |
+
m = _IEER_TYPE_RE.match(piece)
|
530 |
+
if m is None:
|
531 |
+
print("XXXX", piece)
|
532 |
+
chunk = Tree(m.group("type"), [])
|
533 |
+
stack[-1].append(chunk)
|
534 |
+
stack.append(chunk)
|
535 |
+
elif piece.startswith("<e_"):
|
536 |
+
stack.pop()
|
537 |
+
# elif piece.startswith('<'):
|
538 |
+
# print "ERROR:", piece
|
539 |
+
# raise ValueError # Unexpected HTML
|
540 |
+
else:
|
541 |
+
stack[-1].append(piece)
|
542 |
+
except (IndexError, ValueError) as e:
|
543 |
+
raise ValueError(
|
544 |
+
f"Bad IEER string (error at character {piece_m.start():d})"
|
545 |
+
) from e
|
546 |
+
if len(stack) != 1:
|
547 |
+
raise ValueError("Bad IEER string")
|
548 |
+
return stack[0]
|
549 |
+
|
550 |
+
|
551 |
+
def ieerstr2tree(
|
552 |
+
s,
|
553 |
+
chunk_types=[
|
554 |
+
"LOCATION",
|
555 |
+
"ORGANIZATION",
|
556 |
+
"PERSON",
|
557 |
+
"DURATION",
|
558 |
+
"DATE",
|
559 |
+
"CARDINAL",
|
560 |
+
"PERCENT",
|
561 |
+
"MONEY",
|
562 |
+
"MEASURE",
|
563 |
+
],
|
564 |
+
root_label="S",
|
565 |
+
):
|
566 |
+
"""
|
567 |
+
Return a chunk structure containing the chunked tagged text that is
|
568 |
+
encoded in the given IEER style string.
|
569 |
+
Convert a string of chunked tagged text in the IEER named
|
570 |
+
entity format into a chunk structure. Chunks are of several
|
571 |
+
types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
|
572 |
+
PERCENT, MONEY, and MEASURE.
|
573 |
+
|
574 |
+
:rtype: Tree
|
575 |
+
"""
|
576 |
+
|
577 |
+
# Try looking for a single document. If that doesn't work, then just
|
578 |
+
# treat everything as if it was within the <TEXT>...</TEXT>.
|
579 |
+
m = _IEER_DOC_RE.match(s)
|
580 |
+
if m:
|
581 |
+
return {
|
582 |
+
"text": _ieer_read_text(m.group("text"), root_label),
|
583 |
+
"docno": m.group("docno"),
|
584 |
+
"doctype": m.group("doctype"),
|
585 |
+
"date_time": m.group("date_time"),
|
586 |
+
#'headline': m.group('headline')
|
587 |
+
# we want to capture NEs in the headline too!
|
588 |
+
"headline": _ieer_read_text(m.group("headline"), root_label),
|
589 |
+
}
|
590 |
+
else:
|
591 |
+
return _ieer_read_text(s, root_label)
|
592 |
+
|
593 |
+
|
594 |
+
def demo():
|
595 |
+
|
596 |
+
s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
|
597 |
+
import nltk
|
598 |
+
|
599 |
+
t = nltk.chunk.tagstr2tree(s, chunk_label="NP")
|
600 |
+
t.pprint()
|
601 |
+
print()
|
602 |
+
|
603 |
+
s = """
|
604 |
+
These DT B-NP
|
605 |
+
research NN I-NP
|
606 |
+
protocols NNS I-NP
|
607 |
+
offer VBP B-VP
|
608 |
+
to TO B-PP
|
609 |
+
the DT B-NP
|
610 |
+
patient NN I-NP
|
611 |
+
not RB O
|
612 |
+
only RB O
|
613 |
+
the DT B-NP
|
614 |
+
very RB I-NP
|
615 |
+
best JJS I-NP
|
616 |
+
therapy NN I-NP
|
617 |
+
which WDT B-NP
|
618 |
+
we PRP B-NP
|
619 |
+
have VBP B-VP
|
620 |
+
established VBN I-VP
|
621 |
+
today NN B-NP
|
622 |
+
but CC B-NP
|
623 |
+
also RB I-NP
|
624 |
+
the DT B-NP
|
625 |
+
hope NN I-NP
|
626 |
+
of IN B-PP
|
627 |
+
something NN B-NP
|
628 |
+
still RB B-ADJP
|
629 |
+
better JJR I-ADJP
|
630 |
+
. . O
|
631 |
+
"""
|
632 |
+
|
633 |
+
conll_tree = conllstr2tree(s, chunk_types=("NP", "PP"))
|
634 |
+
conll_tree.pprint()
|
635 |
+
|
636 |
+
# Demonstrate CoNLL output
|
637 |
+
print("CoNLL output:")
|
638 |
+
print(nltk.chunk.tree2conllstr(conll_tree))
|
639 |
+
print()
|
640 |
+
|
641 |
+
|
642 |
+
if __name__ == "__main__":
|
643 |
+
demo()
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__init__.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Clusterers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
This module contains a number of basic clustering algorithms. Clustering
|
10 |
+
describes the task of discovering groups of similar items with a large
|
11 |
+
collection. It is also describe as unsupervised machine learning, as the data
|
12 |
+
from which it learns is unannotated with class information, as is the case for
|
13 |
+
supervised learning. Annotated data is difficult and expensive to obtain in
|
14 |
+
the quantities required for the majority of supervised learning algorithms.
|
15 |
+
This problem, the knowledge acquisition bottleneck, is common to most natural
|
16 |
+
language processing tasks, thus fueling the need for quality unsupervised
|
17 |
+
approaches.
|
18 |
+
|
19 |
+
This module contains a k-means clusterer, E-M clusterer and a group average
|
20 |
+
agglomerative clusterer (GAAC). All these clusterers involve finding good
|
21 |
+
cluster groupings for a set of vectors in multi-dimensional space.
|
22 |
+
|
23 |
+
The K-means clusterer starts with k arbitrary chosen means then allocates each
|
24 |
+
vector to the cluster with the closest mean. It then recalculates the means of
|
25 |
+
each cluster as the centroid of the vectors in the cluster. This process
|
26 |
+
repeats until the cluster memberships stabilise. This is a hill-climbing
|
27 |
+
algorithm which may converge to a local maximum. Hence the clustering is
|
28 |
+
often repeated with random initial means and the most commonly occurring
|
29 |
+
output means are chosen.
|
30 |
+
|
31 |
+
The GAAC clusterer starts with each of the *N* vectors as singleton clusters.
|
32 |
+
It then iteratively merges pairs of clusters which have the closest centroids.
|
33 |
+
This continues until there is only one cluster. The order of merges gives rise
|
34 |
+
to a dendrogram - a tree with the earlier merges lower than later merges. The
|
35 |
+
membership of a given number of clusters *c*, *1 <= c <= N*, can be found by
|
36 |
+
cutting the dendrogram at depth *c*.
|
37 |
+
|
38 |
+
The Gaussian EM clusterer models the vectors as being produced by a mixture
|
39 |
+
of k Gaussian sources. The parameters of these sources (prior probability,
|
40 |
+
mean and covariance matrix) are then found to maximise the likelihood of the
|
41 |
+
given data. This is done with the expectation maximisation algorithm. It
|
42 |
+
starts with k arbitrarily chosen means, priors and covariance matrices. It
|
43 |
+
then calculates the membership probabilities for each vector in each of the
|
44 |
+
clusters - this is the 'E' step. The cluster parameters are then updated in
|
45 |
+
the 'M' step using the maximum likelihood estimate from the cluster membership
|
46 |
+
probabilities. This process continues until the likelihood of the data does
|
47 |
+
not significantly increase.
|
48 |
+
|
49 |
+
They all extend the ClusterI interface which defines common operations
|
50 |
+
available with each clusterer. These operations include:
|
51 |
+
|
52 |
+
- cluster: clusters a sequence of vectors
|
53 |
+
- classify: assign a vector to a cluster
|
54 |
+
- classification_probdist: give the probability distribution over cluster memberships
|
55 |
+
|
56 |
+
The current existing classifiers also extend cluster.VectorSpace, an
|
57 |
+
abstract class which allows for singular value decomposition (SVD) and vector
|
58 |
+
normalisation. SVD is used to reduce the dimensionality of the vector space in
|
59 |
+
such a manner as to preserve as much of the variation as possible, by
|
60 |
+
reparameterising the axes in order of variability and discarding all bar the
|
61 |
+
first d dimensions. Normalisation ensures that vectors fall in the unit
|
62 |
+
hypersphere.
|
63 |
+
|
64 |
+
Usage example (see also demo())::
|
65 |
+
|
66 |
+
from nltk import cluster
|
67 |
+
from nltk.cluster import euclidean_distance
|
68 |
+
from numpy import array
|
69 |
+
|
70 |
+
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
|
71 |
+
|
72 |
+
# initialise the clusterer (will also assign the vectors to clusters)
|
73 |
+
clusterer = cluster.KMeansClusterer(2, euclidean_distance)
|
74 |
+
clusterer.cluster(vectors, True)
|
75 |
+
|
76 |
+
# classify a new vector
|
77 |
+
print(clusterer.classify(array([3, 3])))
|
78 |
+
|
79 |
+
Note that the vectors must use numpy array-like
|
80 |
+
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
|
81 |
+
efficiency when required.
|
82 |
+
"""
|
83 |
+
|
84 |
+
from nltk.cluster.em import EMClusterer
|
85 |
+
from nltk.cluster.gaac import GAAClusterer
|
86 |
+
from nltk.cluster.kmeans import KMeansClusterer
|
87 |
+
from nltk.cluster.util import (
|
88 |
+
Dendrogram,
|
89 |
+
VectorSpaceClusterer,
|
90 |
+
cosine_distance,
|
91 |
+
euclidean_distance,
|
92 |
+
)
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc
ADDED
Binary file (2.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc
ADDED
Binary file (6.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc
ADDED
Binary file (4.98 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc
ADDED
Binary file (6.66 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc
ADDED
Binary file (9.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__init__.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Metrics
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
#
|
9 |
+
|
10 |
+
"""
|
11 |
+
NLTK Metrics
|
12 |
+
|
13 |
+
Classes and methods for scoring processing modules.
|
14 |
+
"""
|
15 |
+
|
16 |
+
from nltk.metrics.agreement import AnnotationTask
|
17 |
+
from nltk.metrics.aline import align
|
18 |
+
from nltk.metrics.association import (
|
19 |
+
BigramAssocMeasures,
|
20 |
+
ContingencyMeasures,
|
21 |
+
NgramAssocMeasures,
|
22 |
+
QuadgramAssocMeasures,
|
23 |
+
TrigramAssocMeasures,
|
24 |
+
)
|
25 |
+
from nltk.metrics.confusionmatrix import ConfusionMatrix
|
26 |
+
from nltk.metrics.distance import (
|
27 |
+
binary_distance,
|
28 |
+
custom_distance,
|
29 |
+
edit_distance,
|
30 |
+
edit_distance_align,
|
31 |
+
fractional_presence,
|
32 |
+
interval_distance,
|
33 |
+
jaccard_distance,
|
34 |
+
masi_distance,
|
35 |
+
presence,
|
36 |
+
)
|
37 |
+
from nltk.metrics.paice import Paice
|
38 |
+
from nltk.metrics.scores import (
|
39 |
+
accuracy,
|
40 |
+
approxrand,
|
41 |
+
f_measure,
|
42 |
+
log_likelihood,
|
43 |
+
precision,
|
44 |
+
recall,
|
45 |
+
)
|
46 |
+
from nltk.metrics.segmentation import ghd, pk, windowdiff
|
47 |
+
from nltk.metrics.spearman import (
|
48 |
+
ranks_from_scores,
|
49 |
+
ranks_from_sequence,
|
50 |
+
spearman_correlation,
|
51 |
+
)
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc
ADDED
Binary file (12 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|