Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/agreement.py +465 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/association.py +476 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/distance.py +508 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/scores.py +228 -0
- env-llmeval/lib/python3.10/site-packages/nltk/metrics/spearman.py +68 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__init__.py +11 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/babelfish.py +10 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/chomsky.py +134 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/minimalset.py +85 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/sort.py +176 -0
- env-llmeval/lib/python3.10/site-packages/nltk/misc/wordfinder.py +139 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__init__.py +34 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/api.py +27 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/isri.py +395 -0
- env-llmeval/lib/python3.10/site-packages/nltk/stem/rslp.py +137 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/__init__.py +18 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/all.py +25 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest +552 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/childes.doctest +190 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/childes_fixt.py +13 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/chunk.doctest +372 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/collections.doctest +31 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/conftest.py +33 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/corpus.doctest +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/crubadan.doctest +65 -0
- env-llmeval/lib/python3.10/site-packages/nltk/test/dependency.doctest +241 -0
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc
ADDED
Binary file (13.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc
ADDED
Binary file (7.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/agreement.py
ADDED
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Agreement Metrics
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Tom Lippincott <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
#
|
8 |
+
|
9 |
+
"""
|
10 |
+
Implementations of inter-annotator agreement coefficients surveyed by Artstein
|
11 |
+
and Poesio (2007), Inter-Coder Agreement for Computational Linguistics.
|
12 |
+
|
13 |
+
An agreement coefficient calculates the amount that annotators agreed on label
|
14 |
+
assignments beyond what is expected by chance.
|
15 |
+
|
16 |
+
In defining the AnnotationTask class, we use naming conventions similar to the
|
17 |
+
paper's terminology. There are three types of objects in an annotation task:
|
18 |
+
|
19 |
+
the coders (variables "c" and "C")
|
20 |
+
the items to be annotated (variables "i" and "I")
|
21 |
+
the potential categories to be assigned (variables "k" and "K")
|
22 |
+
|
23 |
+
Additionally, it is often the case that we don't want to treat two different
|
24 |
+
labels as complete disagreement, and so the AnnotationTask constructor can also
|
25 |
+
take a distance metric as a final argument. Distance metrics are simply
|
26 |
+
functions that take two arguments, and return a value between 0.0 and 1.0
|
27 |
+
indicating the distance between them. If not supplied, the default is binary
|
28 |
+
comparison between the arguments.
|
29 |
+
|
30 |
+
The simplest way to initialize an AnnotationTask is with a list of triples,
|
31 |
+
each containing a coder's assignment for one object in the task:
|
32 |
+
|
33 |
+
task = AnnotationTask(data=[('c1', '1', 'v1'),('c2', '1', 'v1'),...])
|
34 |
+
|
35 |
+
Note that the data list needs to contain the same number of triples for each
|
36 |
+
individual coder, containing category values for the same set of items.
|
37 |
+
|
38 |
+
Alpha (Krippendorff 1980)
|
39 |
+
Kappa (Cohen 1960)
|
40 |
+
S (Bennet, Albert and Goldstein 1954)
|
41 |
+
Pi (Scott 1955)
|
42 |
+
|
43 |
+
|
44 |
+
TODO: Describe handling of multiple coders and missing data
|
45 |
+
|
46 |
+
Expected results from the Artstein and Poesio survey paper:
|
47 |
+
|
48 |
+
>>> from nltk.metrics.agreement import AnnotationTask
|
49 |
+
>>> import os.path
|
50 |
+
>>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))])
|
51 |
+
>>> t.avg_Ao()
|
52 |
+
0.88
|
53 |
+
>>> round(t.pi(), 5)
|
54 |
+
0.79953
|
55 |
+
>>> round(t.S(), 2)
|
56 |
+
0.82
|
57 |
+
|
58 |
+
This would have returned a wrong value (0.0) in @785fb79 as coders are in
|
59 |
+
the wrong order. Subsequently, all values for pi(), S(), and kappa() would
|
60 |
+
have been wrong as they are computed with avg_Ao().
|
61 |
+
>>> t2 = AnnotationTask(data=[('b','1','stat'),('a','1','stat')])
|
62 |
+
>>> t2.avg_Ao()
|
63 |
+
1.0
|
64 |
+
|
65 |
+
The following, of course, also works.
|
66 |
+
>>> t3 = AnnotationTask(data=[('a','1','othr'),('b','1','othr')])
|
67 |
+
>>> t3.avg_Ao()
|
68 |
+
1.0
|
69 |
+
|
70 |
+
"""
|
71 |
+
|
72 |
+
import logging
|
73 |
+
from itertools import groupby
|
74 |
+
from operator import itemgetter
|
75 |
+
|
76 |
+
from nltk.internals import deprecated
|
77 |
+
from nltk.metrics.distance import binary_distance
|
78 |
+
from nltk.probability import ConditionalFreqDist, FreqDist
|
79 |
+
|
80 |
+
log = logging.getLogger(__name__)
|
81 |
+
|
82 |
+
|
83 |
+
class AnnotationTask:
|
84 |
+
"""Represents an annotation task, i.e. people assign labels to items.
|
85 |
+
|
86 |
+
Notation tries to match notation in Artstein and Poesio (2007).
|
87 |
+
|
88 |
+
In general, coders and items can be represented as any hashable object.
|
89 |
+
Integers, for example, are fine, though strings are more readable.
|
90 |
+
Labels must support the distance functions applied to them, so e.g.
|
91 |
+
a string-edit-distance makes no sense if your labels are integers,
|
92 |
+
whereas interval distance needs numeric values. A notable case of this
|
93 |
+
is the MASI metric, which requires Python sets.
|
94 |
+
"""
|
95 |
+
|
96 |
+
def __init__(self, data=None, distance=binary_distance):
|
97 |
+
"""Initialize an annotation task.
|
98 |
+
|
99 |
+
The data argument can be None (to create an empty annotation task) or a sequence of 3-tuples,
|
100 |
+
each representing a coder's labeling of an item:
|
101 |
+
``(coder,item,label)``
|
102 |
+
|
103 |
+
The distance argument is a function taking two arguments (labels) and producing a numerical distance.
|
104 |
+
The distance from a label to itself should be zero:
|
105 |
+
``distance(l,l) = 0``
|
106 |
+
"""
|
107 |
+
self.distance = distance
|
108 |
+
self.I = set()
|
109 |
+
self.K = set()
|
110 |
+
self.C = set()
|
111 |
+
self.data = []
|
112 |
+
if data is not None:
|
113 |
+
self.load_array(data)
|
114 |
+
|
115 |
+
def __str__(self):
|
116 |
+
return "\r\n".join(
|
117 |
+
map(
|
118 |
+
lambda x: "%s\t%s\t%s"
|
119 |
+
% (x["coder"], x["item"].replace("_", "\t"), ",".join(x["labels"])),
|
120 |
+
self.data,
|
121 |
+
)
|
122 |
+
)
|
123 |
+
|
124 |
+
def load_array(self, array):
|
125 |
+
"""Load an sequence of annotation results, appending to any data already loaded.
|
126 |
+
|
127 |
+
The argument is a sequence of 3-tuples, each representing a coder's labeling of an item:
|
128 |
+
(coder,item,label)
|
129 |
+
"""
|
130 |
+
for coder, item, labels in array:
|
131 |
+
self.C.add(coder)
|
132 |
+
self.K.add(labels)
|
133 |
+
self.I.add(item)
|
134 |
+
self.data.append({"coder": coder, "labels": labels, "item": item})
|
135 |
+
|
136 |
+
def agr(self, cA, cB, i, data=None):
|
137 |
+
"""Agreement between two coders on a given item"""
|
138 |
+
data = data or self.data
|
139 |
+
# cfedermann: we don't know what combination of coder/item will come
|
140 |
+
# first in x; to avoid StopIteration problems due to assuming an order
|
141 |
+
# cA,cB, we allow either for k1 and then look up the missing as k2.
|
142 |
+
k1 = next(x for x in data if x["coder"] in (cA, cB) and x["item"] == i)
|
143 |
+
if k1["coder"] == cA:
|
144 |
+
k2 = next(x for x in data if x["coder"] == cB and x["item"] == i)
|
145 |
+
else:
|
146 |
+
k2 = next(x for x in data if x["coder"] == cA and x["item"] == i)
|
147 |
+
|
148 |
+
ret = 1.0 - float(self.distance(k1["labels"], k2["labels"]))
|
149 |
+
log.debug("Observed agreement between %s and %s on %s: %f", cA, cB, i, ret)
|
150 |
+
log.debug(
|
151 |
+
'Distance between "%r" and "%r": %f', k1["labels"], k2["labels"], 1.0 - ret
|
152 |
+
)
|
153 |
+
return ret
|
154 |
+
|
155 |
+
def Nk(self, k):
|
156 |
+
return float(sum(1 for x in self.data if x["labels"] == k))
|
157 |
+
|
158 |
+
def Nik(self, i, k):
|
159 |
+
return float(sum(1 for x in self.data if x["item"] == i and x["labels"] == k))
|
160 |
+
|
161 |
+
def Nck(self, c, k):
|
162 |
+
return float(sum(1 for x in self.data if x["coder"] == c and x["labels"] == k))
|
163 |
+
|
164 |
+
@deprecated("Use Nk, Nik or Nck instead")
|
165 |
+
def N(self, k=None, i=None, c=None):
|
166 |
+
"""Implements the "n-notation" used in Artstein and Poesio (2007)"""
|
167 |
+
if k is not None and i is None and c is None:
|
168 |
+
ret = self.Nk(k)
|
169 |
+
elif k is not None and i is not None and c is None:
|
170 |
+
ret = self.Nik(i, k)
|
171 |
+
elif k is not None and c is not None and i is None:
|
172 |
+
ret = self.Nck(c, k)
|
173 |
+
else:
|
174 |
+
raise ValueError(
|
175 |
+
f"You must pass either i or c, not both! (k={k!r},i={i!r},c={c!r})"
|
176 |
+
)
|
177 |
+
log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret)
|
178 |
+
return ret
|
179 |
+
|
180 |
+
def _grouped_data(self, field, data=None):
|
181 |
+
data = data or self.data
|
182 |
+
return groupby(sorted(data, key=itemgetter(field)), itemgetter(field))
|
183 |
+
|
184 |
+
def Ao(self, cA, cB):
|
185 |
+
"""Observed agreement between two coders on all items."""
|
186 |
+
data = self._grouped_data(
|
187 |
+
"item", (x for x in self.data if x["coder"] in (cA, cB))
|
188 |
+
)
|
189 |
+
ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len(
|
190 |
+
self.I
|
191 |
+
)
|
192 |
+
log.debug("Observed agreement between %s and %s: %f", cA, cB, ret)
|
193 |
+
return ret
|
194 |
+
|
195 |
+
def _pairwise_average(self, function):
|
196 |
+
"""
|
197 |
+
Calculates the average of function results for each coder pair
|
198 |
+
"""
|
199 |
+
total = 0
|
200 |
+
n = 0
|
201 |
+
s = self.C.copy()
|
202 |
+
for cA in self.C:
|
203 |
+
s.remove(cA)
|
204 |
+
for cB in s:
|
205 |
+
total += function(cA, cB)
|
206 |
+
n += 1
|
207 |
+
ret = total / n
|
208 |
+
return ret
|
209 |
+
|
210 |
+
def avg_Ao(self):
|
211 |
+
"""Average observed agreement across all coders and items."""
|
212 |
+
ret = self._pairwise_average(self.Ao)
|
213 |
+
log.debug("Average observed agreement: %f", ret)
|
214 |
+
return ret
|
215 |
+
|
216 |
+
def Do_Kw_pairwise(self, cA, cB, max_distance=1.0):
|
217 |
+
"""The observed disagreement for the weighted kappa coefficient."""
|
218 |
+
total = 0.0
|
219 |
+
data = (x for x in self.data if x["coder"] in (cA, cB))
|
220 |
+
for i, itemdata in self._grouped_data("item", data):
|
221 |
+
# we should have two items; distance doesn't care which comes first
|
222 |
+
total += self.distance(next(itemdata)["labels"], next(itemdata)["labels"])
|
223 |
+
|
224 |
+
ret = total / (len(self.I) * max_distance)
|
225 |
+
log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret)
|
226 |
+
return ret
|
227 |
+
|
228 |
+
def Do_Kw(self, max_distance=1.0):
|
229 |
+
"""Averaged over all labelers"""
|
230 |
+
ret = self._pairwise_average(
|
231 |
+
lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance)
|
232 |
+
)
|
233 |
+
log.debug("Observed disagreement: %f", ret)
|
234 |
+
return ret
|
235 |
+
|
236 |
+
# Agreement Coefficients
|
237 |
+
def S(self):
|
238 |
+
"""Bennett, Albert and Goldstein 1954"""
|
239 |
+
Ae = 1.0 / len(self.K)
|
240 |
+
ret = (self.avg_Ao() - Ae) / (1.0 - Ae)
|
241 |
+
return ret
|
242 |
+
|
243 |
+
def pi(self):
|
244 |
+
"""Scott 1955; here, multi-pi.
|
245 |
+
Equivalent to K from Siegel and Castellan (1988).
|
246 |
+
|
247 |
+
"""
|
248 |
+
total = 0.0
|
249 |
+
label_freqs = FreqDist(x["labels"] for x in self.data)
|
250 |
+
for k, f in label_freqs.items():
|
251 |
+
total += f**2
|
252 |
+
Ae = total / ((len(self.I) * len(self.C)) ** 2)
|
253 |
+
return (self.avg_Ao() - Ae) / (1 - Ae)
|
254 |
+
|
255 |
+
def Ae_kappa(self, cA, cB):
|
256 |
+
Ae = 0.0
|
257 |
+
nitems = float(len(self.I))
|
258 |
+
label_freqs = ConditionalFreqDist((x["labels"], x["coder"]) for x in self.data)
|
259 |
+
for k in label_freqs.conditions():
|
260 |
+
Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems)
|
261 |
+
return Ae
|
262 |
+
|
263 |
+
def kappa_pairwise(self, cA, cB):
|
264 |
+
""" """
|
265 |
+
Ae = self.Ae_kappa(cA, cB)
|
266 |
+
ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae)
|
267 |
+
log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae)
|
268 |
+
return ret
|
269 |
+
|
270 |
+
def kappa(self):
|
271 |
+
"""Cohen 1960
|
272 |
+
Averages naively over kappas for each coder pair.
|
273 |
+
|
274 |
+
"""
|
275 |
+
return self._pairwise_average(self.kappa_pairwise)
|
276 |
+
|
277 |
+
def multi_kappa(self):
|
278 |
+
"""Davies and Fleiss 1982
|
279 |
+
Averages over observed and expected agreements for each coder pair.
|
280 |
+
|
281 |
+
"""
|
282 |
+
Ae = self._pairwise_average(self.Ae_kappa)
|
283 |
+
return (self.avg_Ao() - Ae) / (1.0 - Ae)
|
284 |
+
|
285 |
+
def Disagreement(self, label_freqs):
|
286 |
+
total_labels = sum(label_freqs.values())
|
287 |
+
pairs = 0.0
|
288 |
+
for j, nj in label_freqs.items():
|
289 |
+
for l, nl in label_freqs.items():
|
290 |
+
pairs += float(nj * nl) * self.distance(l, j)
|
291 |
+
return 1.0 * pairs / (total_labels * (total_labels - 1))
|
292 |
+
|
293 |
+
def alpha(self):
|
294 |
+
"""Krippendorff 1980"""
|
295 |
+
# check for degenerate cases
|
296 |
+
if len(self.K) == 0:
|
297 |
+
raise ValueError("Cannot calculate alpha, no data present!")
|
298 |
+
if len(self.K) == 1:
|
299 |
+
log.debug("Only one annotation value, alpha returning 1.")
|
300 |
+
return 1
|
301 |
+
if len(self.C) == 1 and len(self.I) == 1:
|
302 |
+
raise ValueError("Cannot calculate alpha, only one coder and item present!")
|
303 |
+
|
304 |
+
total_disagreement = 0.0
|
305 |
+
total_ratings = 0
|
306 |
+
all_valid_labels_freq = FreqDist([])
|
307 |
+
|
308 |
+
total_do = 0.0 # Total observed disagreement for all items.
|
309 |
+
for i, itemdata in self._grouped_data("item"):
|
310 |
+
label_freqs = FreqDist(x["labels"] for x in itemdata)
|
311 |
+
labels_count = sum(label_freqs.values())
|
312 |
+
if labels_count < 2:
|
313 |
+
# Ignore the item.
|
314 |
+
continue
|
315 |
+
all_valid_labels_freq += label_freqs
|
316 |
+
total_do += self.Disagreement(label_freqs) * labels_count
|
317 |
+
|
318 |
+
do = total_do / sum(all_valid_labels_freq.values())
|
319 |
+
|
320 |
+
de = self.Disagreement(all_valid_labels_freq) # Expected disagreement.
|
321 |
+
k_alpha = 1.0 - do / de
|
322 |
+
|
323 |
+
return k_alpha
|
324 |
+
|
325 |
+
def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0):
|
326 |
+
"""Cohen 1968"""
|
327 |
+
total = 0.0
|
328 |
+
label_freqs = ConditionalFreqDist(
|
329 |
+
(x["coder"], x["labels"]) for x in self.data if x["coder"] in (cA, cB)
|
330 |
+
)
|
331 |
+
for j in self.K:
|
332 |
+
for l in self.K:
|
333 |
+
total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l)
|
334 |
+
De = total / (max_distance * pow(len(self.I), 2))
|
335 |
+
log.debug("Expected disagreement between %s and %s: %f", cA, cB, De)
|
336 |
+
Do = self.Do_Kw_pairwise(cA, cB)
|
337 |
+
ret = 1.0 - (Do / De)
|
338 |
+
return ret
|
339 |
+
|
340 |
+
def weighted_kappa(self, max_distance=1.0):
|
341 |
+
"""Cohen 1968"""
|
342 |
+
return self._pairwise_average(
|
343 |
+
lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance)
|
344 |
+
)
|
345 |
+
|
346 |
+
|
347 |
+
if __name__ == "__main__":
|
348 |
+
|
349 |
+
import optparse
|
350 |
+
import re
|
351 |
+
|
352 |
+
from nltk.metrics import distance
|
353 |
+
|
354 |
+
# process command-line arguments
|
355 |
+
parser = optparse.OptionParser()
|
356 |
+
parser.add_option(
|
357 |
+
"-d",
|
358 |
+
"--distance",
|
359 |
+
dest="distance",
|
360 |
+
default="binary_distance",
|
361 |
+
help="distance metric to use",
|
362 |
+
)
|
363 |
+
parser.add_option(
|
364 |
+
"-a",
|
365 |
+
"--agreement",
|
366 |
+
dest="agreement",
|
367 |
+
default="kappa",
|
368 |
+
help="agreement coefficient to calculate",
|
369 |
+
)
|
370 |
+
parser.add_option(
|
371 |
+
"-e",
|
372 |
+
"--exclude",
|
373 |
+
dest="exclude",
|
374 |
+
action="append",
|
375 |
+
default=[],
|
376 |
+
help="coder names to exclude (may be specified multiple times)",
|
377 |
+
)
|
378 |
+
parser.add_option(
|
379 |
+
"-i",
|
380 |
+
"--include",
|
381 |
+
dest="include",
|
382 |
+
action="append",
|
383 |
+
default=[],
|
384 |
+
help="coder names to include, same format as exclude",
|
385 |
+
)
|
386 |
+
parser.add_option(
|
387 |
+
"-f",
|
388 |
+
"--file",
|
389 |
+
dest="file",
|
390 |
+
help="file to read labelings from, each line with three columns: 'labeler item labels'",
|
391 |
+
)
|
392 |
+
parser.add_option(
|
393 |
+
"-v",
|
394 |
+
"--verbose",
|
395 |
+
dest="verbose",
|
396 |
+
default="0",
|
397 |
+
help="how much debugging to print on stderr (0-4)",
|
398 |
+
)
|
399 |
+
parser.add_option(
|
400 |
+
"-c",
|
401 |
+
"--columnsep",
|
402 |
+
dest="columnsep",
|
403 |
+
default="\t",
|
404 |
+
help="char/string that separates the three columns in the file, defaults to tab",
|
405 |
+
)
|
406 |
+
parser.add_option(
|
407 |
+
"-l",
|
408 |
+
"--labelsep",
|
409 |
+
dest="labelsep",
|
410 |
+
default=",",
|
411 |
+
help="char/string that separates labels (if labelers can assign more than one), defaults to comma",
|
412 |
+
)
|
413 |
+
parser.add_option(
|
414 |
+
"-p",
|
415 |
+
"--presence",
|
416 |
+
dest="presence",
|
417 |
+
default=None,
|
418 |
+
help="convert each labeling into 1 or 0, based on presence of LABEL",
|
419 |
+
)
|
420 |
+
parser.add_option(
|
421 |
+
"-T",
|
422 |
+
"--thorough",
|
423 |
+
dest="thorough",
|
424 |
+
default=False,
|
425 |
+
action="store_true",
|
426 |
+
help="calculate agreement for every subset of the annotators",
|
427 |
+
)
|
428 |
+
(options, remainder) = parser.parse_args()
|
429 |
+
|
430 |
+
if not options.file:
|
431 |
+
parser.print_help()
|
432 |
+
exit()
|
433 |
+
|
434 |
+
logging.basicConfig(level=50 - 10 * int(options.verbose))
|
435 |
+
|
436 |
+
# read in data from the specified file
|
437 |
+
data = []
|
438 |
+
with open(options.file) as infile:
|
439 |
+
for l in infile:
|
440 |
+
toks = l.split(options.columnsep)
|
441 |
+
coder, object_, labels = (
|
442 |
+
toks[0],
|
443 |
+
str(toks[1:-1]),
|
444 |
+
frozenset(toks[-1].strip().split(options.labelsep)),
|
445 |
+
)
|
446 |
+
if (
|
447 |
+
(options.include == options.exclude)
|
448 |
+
or (len(options.include) > 0 and coder in options.include)
|
449 |
+
or (len(options.exclude) > 0 and coder not in options.exclude)
|
450 |
+
):
|
451 |
+
data.append((coder, object_, labels))
|
452 |
+
|
453 |
+
if options.presence:
|
454 |
+
task = AnnotationTask(
|
455 |
+
data, getattr(distance, options.distance)(options.presence)
|
456 |
+
)
|
457 |
+
else:
|
458 |
+
task = AnnotationTask(data, getattr(distance, options.distance))
|
459 |
+
|
460 |
+
if options.thorough:
|
461 |
+
pass
|
462 |
+
else:
|
463 |
+
print(getattr(task, options.agreement)())
|
464 |
+
|
465 |
+
logging.shutdown()
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/association.py
ADDED
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Ngram Association Measures
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Joel Nothman <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Provides scoring functions for a number of association measures through a
|
10 |
+
generic, abstract implementation in ``NgramAssocMeasures``, and n-specific
|
11 |
+
``BigramAssocMeasures`` and ``TrigramAssocMeasures``.
|
12 |
+
"""
|
13 |
+
|
14 |
+
import math as _math
|
15 |
+
from abc import ABCMeta, abstractmethod
|
16 |
+
from functools import reduce
|
17 |
+
|
18 |
+
_log2 = lambda x: _math.log2(x)
|
19 |
+
_ln = _math.log
|
20 |
+
|
21 |
+
_product = lambda s: reduce(lambda x, y: x * y, s)
|
22 |
+
|
23 |
+
_SMALL = 1e-20
|
24 |
+
|
25 |
+
try:
|
26 |
+
from scipy.stats import fisher_exact
|
27 |
+
except ImportError:
|
28 |
+
|
29 |
+
def fisher_exact(*_args, **_kwargs):
|
30 |
+
raise NotImplementedError
|
31 |
+
|
32 |
+
|
33 |
+
### Indices to marginals arguments:
|
34 |
+
|
35 |
+
NGRAM = 0
|
36 |
+
"""Marginals index for the ngram count"""
|
37 |
+
|
38 |
+
UNIGRAMS = -2
|
39 |
+
"""Marginals index for a tuple of each unigram count"""
|
40 |
+
|
41 |
+
TOTAL = -1
|
42 |
+
"""Marginals index for the number of words in the data"""
|
43 |
+
|
44 |
+
|
45 |
+
class NgramAssocMeasures(metaclass=ABCMeta):
|
46 |
+
"""
|
47 |
+
An abstract class defining a collection of generic association measures.
|
48 |
+
Each public method returns a score, taking the following arguments::
|
49 |
+
|
50 |
+
score_fn(count_of_ngram,
|
51 |
+
(count_of_n-1gram_1, ..., count_of_n-1gram_j),
|
52 |
+
(count_of_n-2gram_1, ..., count_of_n-2gram_k),
|
53 |
+
...,
|
54 |
+
(count_of_1gram_1, ..., count_of_1gram_n),
|
55 |
+
count_of_total_words)
|
56 |
+
|
57 |
+
See ``BigramAssocMeasures`` and ``TrigramAssocMeasures``
|
58 |
+
|
59 |
+
Inheriting classes should define a property _n, and a method _contingency
|
60 |
+
which calculates contingency values from marginals in order for all
|
61 |
+
association measures defined here to be usable.
|
62 |
+
"""
|
63 |
+
|
64 |
+
_n = 0
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
@abstractmethod
|
68 |
+
def _contingency(*marginals):
|
69 |
+
"""Calculates values of a contingency table from marginal values."""
|
70 |
+
raise NotImplementedError(
|
71 |
+
"The contingency table is not available" "in the general ngram case"
|
72 |
+
)
|
73 |
+
|
74 |
+
@staticmethod
|
75 |
+
@abstractmethod
|
76 |
+
def _marginals(*contingency):
|
77 |
+
"""Calculates values of contingency table marginals from its values."""
|
78 |
+
raise NotImplementedError(
|
79 |
+
"The contingency table is not available" "in the general ngram case"
|
80 |
+
)
|
81 |
+
|
82 |
+
@classmethod
|
83 |
+
def _expected_values(cls, cont):
|
84 |
+
"""Calculates expected values for a contingency table."""
|
85 |
+
n_all = sum(cont)
|
86 |
+
bits = [1 << i for i in range(cls._n)]
|
87 |
+
|
88 |
+
# For each contingency table cell
|
89 |
+
for i in range(len(cont)):
|
90 |
+
# Yield the expected value
|
91 |
+
yield (
|
92 |
+
_product(
|
93 |
+
sum(cont[x] for x in range(2**cls._n) if (x & j) == (i & j))
|
94 |
+
for j in bits
|
95 |
+
)
|
96 |
+
/ (n_all ** (cls._n - 1))
|
97 |
+
)
|
98 |
+
|
99 |
+
@staticmethod
|
100 |
+
def raw_freq(*marginals):
|
101 |
+
"""Scores ngrams by their frequency"""
|
102 |
+
return marginals[NGRAM] / marginals[TOTAL]
|
103 |
+
|
104 |
+
@classmethod
|
105 |
+
def student_t(cls, *marginals):
|
106 |
+
"""Scores ngrams using Student's t test with independence hypothesis
|
107 |
+
for unigrams, as in Manning and Schutze 5.3.1.
|
108 |
+
"""
|
109 |
+
return (
|
110 |
+
marginals[NGRAM]
|
111 |
+
- _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1))
|
112 |
+
) / (marginals[NGRAM] + _SMALL) ** 0.5
|
113 |
+
|
114 |
+
@classmethod
|
115 |
+
def chi_sq(cls, *marginals):
|
116 |
+
"""Scores ngrams using Pearson's chi-square as in Manning and Schutze
|
117 |
+
5.3.3.
|
118 |
+
"""
|
119 |
+
cont = cls._contingency(*marginals)
|
120 |
+
exps = cls._expected_values(cont)
|
121 |
+
return sum((obs - exp) ** 2 / (exp + _SMALL) for obs, exp in zip(cont, exps))
|
122 |
+
|
123 |
+
@staticmethod
|
124 |
+
def mi_like(*marginals, **kwargs):
|
125 |
+
"""Scores ngrams using a variant of mutual information. The keyword
|
126 |
+
argument power sets an exponent (default 3) for the numerator. No
|
127 |
+
logarithm of the result is calculated.
|
128 |
+
"""
|
129 |
+
return marginals[NGRAM] ** kwargs.get("power", 3) / _product(
|
130 |
+
marginals[UNIGRAMS]
|
131 |
+
)
|
132 |
+
|
133 |
+
@classmethod
|
134 |
+
def pmi(cls, *marginals):
|
135 |
+
"""Scores ngrams by pointwise mutual information, as in Manning and
|
136 |
+
Schutze 5.4.
|
137 |
+
"""
|
138 |
+
return _log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) - _log2(
|
139 |
+
_product(marginals[UNIGRAMS])
|
140 |
+
)
|
141 |
+
|
142 |
+
@classmethod
|
143 |
+
def likelihood_ratio(cls, *marginals):
|
144 |
+
"""Scores ngrams using likelihood ratios as in Manning and Schutze 5.3.4."""
|
145 |
+
cont = cls._contingency(*marginals)
|
146 |
+
return 2 * sum(
|
147 |
+
obs * _ln(obs / (exp + _SMALL) + _SMALL)
|
148 |
+
for obs, exp in zip(cont, cls._expected_values(cont))
|
149 |
+
)
|
150 |
+
|
151 |
+
@classmethod
|
152 |
+
def poisson_stirling(cls, *marginals):
|
153 |
+
"""Scores ngrams using the Poisson-Stirling measure."""
|
154 |
+
exp = _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1))
|
155 |
+
return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1)
|
156 |
+
|
157 |
+
@classmethod
|
158 |
+
def jaccard(cls, *marginals):
|
159 |
+
"""Scores ngrams using the Jaccard index."""
|
160 |
+
cont = cls._contingency(*marginals)
|
161 |
+
return cont[0] / sum(cont[:-1])
|
162 |
+
|
163 |
+
|
164 |
+
class BigramAssocMeasures(NgramAssocMeasures):
|
165 |
+
"""
|
166 |
+
A collection of bigram association measures. Each association measure
|
167 |
+
is provided as a function with three arguments::
|
168 |
+
|
169 |
+
bigram_score_fn(n_ii, (n_ix, n_xi), n_xx)
|
170 |
+
|
171 |
+
The arguments constitute the marginals of a contingency table, counting
|
172 |
+
the occurrences of particular events in a corpus. The letter i in the
|
173 |
+
suffix refers to the appearance of the word in question, while x indicates
|
174 |
+
the appearance of any word. Thus, for example:
|
175 |
+
|
176 |
+
- n_ii counts ``(w1, w2)``, i.e. the bigram being scored
|
177 |
+
- n_ix counts ``(w1, *)``
|
178 |
+
- n_xi counts ``(*, w2)``
|
179 |
+
- n_xx counts ``(*, *)``, i.e. any bigram
|
180 |
+
|
181 |
+
This may be shown with respect to a contingency table::
|
182 |
+
|
183 |
+
w1 ~w1
|
184 |
+
------ ------
|
185 |
+
w2 | n_ii | n_oi | = n_xi
|
186 |
+
------ ------
|
187 |
+
~w2 | n_io | n_oo |
|
188 |
+
------ ------
|
189 |
+
= n_ix TOTAL = n_xx
|
190 |
+
"""
|
191 |
+
|
192 |
+
_n = 2
|
193 |
+
|
194 |
+
@staticmethod
|
195 |
+
def _contingency(n_ii, n_ix_xi_tuple, n_xx):
|
196 |
+
"""Calculates values of a bigram contingency table from marginal values."""
|
197 |
+
(n_ix, n_xi) = n_ix_xi_tuple
|
198 |
+
n_oi = n_xi - n_ii
|
199 |
+
n_io = n_ix - n_ii
|
200 |
+
return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io)
|
201 |
+
|
202 |
+
@staticmethod
|
203 |
+
def _marginals(n_ii, n_oi, n_io, n_oo):
|
204 |
+
"""Calculates values of contingency table marginals from its values."""
|
205 |
+
return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii)
|
206 |
+
|
207 |
+
@staticmethod
|
208 |
+
def _expected_values(cont):
|
209 |
+
"""Calculates expected values for a contingency table."""
|
210 |
+
n_xx = sum(cont)
|
211 |
+
# For each contingency table cell
|
212 |
+
for i in range(4):
|
213 |
+
yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx
|
214 |
+
|
215 |
+
@classmethod
|
216 |
+
def phi_sq(cls, *marginals):
|
217 |
+
"""Scores bigrams using phi-square, the square of the Pearson correlation
|
218 |
+
coefficient.
|
219 |
+
"""
|
220 |
+
n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals)
|
221 |
+
|
222 |
+
return (n_ii * n_oo - n_io * n_oi) ** 2 / (
|
223 |
+
(n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo)
|
224 |
+
)
|
225 |
+
|
226 |
+
@classmethod
|
227 |
+
def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx):
|
228 |
+
"""Scores bigrams using chi-square, i.e. phi-sq multiplied by the number
|
229 |
+
of bigrams, as in Manning and Schutze 5.3.3.
|
230 |
+
"""
|
231 |
+
(n_ix, n_xi) = n_ix_xi_tuple
|
232 |
+
return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx)
|
233 |
+
|
234 |
+
@classmethod
|
235 |
+
def fisher(cls, *marginals):
|
236 |
+
"""Scores bigrams using Fisher's Exact Test (Pedersen 1996). Less
|
237 |
+
sensitive to small counts than PMI or Chi Sq, but also more expensive
|
238 |
+
to compute. Requires scipy.
|
239 |
+
"""
|
240 |
+
|
241 |
+
n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals)
|
242 |
+
|
243 |
+
(odds, pvalue) = fisher_exact([[n_ii, n_io], [n_oi, n_oo]], alternative="less")
|
244 |
+
return pvalue
|
245 |
+
|
246 |
+
@staticmethod
|
247 |
+
def dice(n_ii, n_ix_xi_tuple, n_xx):
|
248 |
+
"""Scores bigrams using Dice's coefficient."""
|
249 |
+
(n_ix, n_xi) = n_ix_xi_tuple
|
250 |
+
return 2 * n_ii / (n_ix + n_xi)
|
251 |
+
|
252 |
+
|
253 |
+
class TrigramAssocMeasures(NgramAssocMeasures):
|
254 |
+
"""
|
255 |
+
A collection of trigram association measures. Each association measure
|
256 |
+
is provided as a function with four arguments::
|
257 |
+
|
258 |
+
trigram_score_fn(n_iii,
|
259 |
+
(n_iix, n_ixi, n_xii),
|
260 |
+
(n_ixx, n_xix, n_xxi),
|
261 |
+
n_xxx)
|
262 |
+
|
263 |
+
The arguments constitute the marginals of a contingency table, counting
|
264 |
+
the occurrences of particular events in a corpus. The letter i in the
|
265 |
+
suffix refers to the appearance of the word in question, while x indicates
|
266 |
+
the appearance of any word. Thus, for example:
|
267 |
+
|
268 |
+
- n_iii counts ``(w1, w2, w3)``, i.e. the trigram being scored
|
269 |
+
- n_ixx counts ``(w1, *, *)``
|
270 |
+
- n_xxx counts ``(*, *, *)``, i.e. any trigram
|
271 |
+
"""
|
272 |
+
|
273 |
+
_n = 3
|
274 |
+
|
275 |
+
@staticmethod
|
276 |
+
def _contingency(n_iii, n_iix_tuple, n_ixx_tuple, n_xxx):
|
277 |
+
"""Calculates values of a trigram contingency table (or cube) from
|
278 |
+
marginal values.
|
279 |
+
>>> TrigramAssocMeasures._contingency(1, (1, 1, 1), (1, 73, 1), 2000)
|
280 |
+
(1, 0, 0, 0, 0, 72, 0, 1927)
|
281 |
+
"""
|
282 |
+
(n_iix, n_ixi, n_xii) = n_iix_tuple
|
283 |
+
(n_ixx, n_xix, n_xxi) = n_ixx_tuple
|
284 |
+
n_oii = n_xii - n_iii
|
285 |
+
n_ioi = n_ixi - n_iii
|
286 |
+
n_iio = n_iix - n_iii
|
287 |
+
n_ooi = n_xxi - n_iii - n_oii - n_ioi
|
288 |
+
n_oio = n_xix - n_iii - n_oii - n_iio
|
289 |
+
n_ioo = n_ixx - n_iii - n_ioi - n_iio
|
290 |
+
n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo
|
291 |
+
|
292 |
+
return (n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo)
|
293 |
+
|
294 |
+
@staticmethod
|
295 |
+
def _marginals(*contingency):
|
296 |
+
"""Calculates values of contingency table marginals from its values.
|
297 |
+
>>> TrigramAssocMeasures._marginals(1, 0, 0, 0, 0, 72, 0, 1927)
|
298 |
+
(1, (1, 1, 1), (1, 73, 1), 2000)
|
299 |
+
"""
|
300 |
+
n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo = contingency
|
301 |
+
return (
|
302 |
+
n_iii,
|
303 |
+
(n_iii + n_iio, n_iii + n_ioi, n_iii + n_oii),
|
304 |
+
(
|
305 |
+
n_iii + n_ioi + n_iio + n_ioo,
|
306 |
+
n_iii + n_oii + n_iio + n_oio,
|
307 |
+
n_iii + n_oii + n_ioi + n_ooi,
|
308 |
+
),
|
309 |
+
sum(contingency),
|
310 |
+
)
|
311 |
+
|
312 |
+
|
313 |
+
class QuadgramAssocMeasures(NgramAssocMeasures):
|
314 |
+
"""
|
315 |
+
A collection of quadgram association measures. Each association measure
|
316 |
+
is provided as a function with five arguments::
|
317 |
+
|
318 |
+
trigram_score_fn(n_iiii,
|
319 |
+
(n_iiix, n_iixi, n_ixii, n_xiii),
|
320 |
+
(n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
|
321 |
+
(n_ixxx, n_xixx, n_xxix, n_xxxi),
|
322 |
+
n_all)
|
323 |
+
|
324 |
+
The arguments constitute the marginals of a contingency table, counting
|
325 |
+
the occurrences of particular events in a corpus. The letter i in the
|
326 |
+
suffix refers to the appearance of the word in question, while x indicates
|
327 |
+
the appearance of any word. Thus, for example:
|
328 |
+
|
329 |
+
- n_iiii counts ``(w1, w2, w3, w4)``, i.e. the quadgram being scored
|
330 |
+
- n_ixxi counts ``(w1, *, *, w4)``
|
331 |
+
- n_xxxx counts ``(*, *, *, *)``, i.e. any quadgram
|
332 |
+
"""
|
333 |
+
|
334 |
+
_n = 4
|
335 |
+
|
336 |
+
@staticmethod
|
337 |
+
def _contingency(n_iiii, n_iiix_tuple, n_iixx_tuple, n_ixxx_tuple, n_xxxx):
|
338 |
+
"""Calculates values of a quadgram contingency table from
|
339 |
+
marginal values.
|
340 |
+
"""
|
341 |
+
(n_iiix, n_iixi, n_ixii, n_xiii) = n_iiix_tuple
|
342 |
+
(n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix) = n_iixx_tuple
|
343 |
+
(n_ixxx, n_xixx, n_xxix, n_xxxi) = n_ixxx_tuple
|
344 |
+
n_oiii = n_xiii - n_iiii
|
345 |
+
n_ioii = n_ixii - n_iiii
|
346 |
+
n_iioi = n_iixi - n_iiii
|
347 |
+
n_ooii = n_xxii - n_iiii - n_oiii - n_ioii
|
348 |
+
n_oioi = n_xixi - n_iiii - n_oiii - n_iioi
|
349 |
+
n_iooi = n_ixxi - n_iiii - n_ioii - n_iioi
|
350 |
+
n_oooi = n_xxxi - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_iooi - n_oioi
|
351 |
+
n_iiio = n_iiix - n_iiii
|
352 |
+
n_oiio = n_xiix - n_iiii - n_oiii - n_iiio
|
353 |
+
n_ioio = n_ixix - n_iiii - n_ioii - n_iiio
|
354 |
+
n_ooio = n_xxix - n_iiii - n_oiii - n_ioii - n_iiio - n_ooii - n_ioio - n_oiio
|
355 |
+
n_iioo = n_iixx - n_iiii - n_iioi - n_iiio
|
356 |
+
n_oioo = n_xixx - n_iiii - n_oiii - n_iioi - n_iiio - n_oioi - n_oiio - n_iioo
|
357 |
+
n_iooo = n_ixxx - n_iiii - n_ioii - n_iioi - n_iiio - n_iooi - n_iioo - n_ioio
|
358 |
+
n_oooo = (
|
359 |
+
n_xxxx
|
360 |
+
- n_iiii
|
361 |
+
- n_oiii
|
362 |
+
- n_ioii
|
363 |
+
- n_iioi
|
364 |
+
- n_ooii
|
365 |
+
- n_oioi
|
366 |
+
- n_iooi
|
367 |
+
- n_oooi
|
368 |
+
- n_iiio
|
369 |
+
- n_oiio
|
370 |
+
- n_ioio
|
371 |
+
- n_ooio
|
372 |
+
- n_iioo
|
373 |
+
- n_oioo
|
374 |
+
- n_iooo
|
375 |
+
)
|
376 |
+
|
377 |
+
return (
|
378 |
+
n_iiii,
|
379 |
+
n_oiii,
|
380 |
+
n_ioii,
|
381 |
+
n_ooii,
|
382 |
+
n_iioi,
|
383 |
+
n_oioi,
|
384 |
+
n_iooi,
|
385 |
+
n_oooi,
|
386 |
+
n_iiio,
|
387 |
+
n_oiio,
|
388 |
+
n_ioio,
|
389 |
+
n_ooio,
|
390 |
+
n_iioo,
|
391 |
+
n_oioo,
|
392 |
+
n_iooo,
|
393 |
+
n_oooo,
|
394 |
+
)
|
395 |
+
|
396 |
+
@staticmethod
|
397 |
+
def _marginals(*contingency):
|
398 |
+
"""Calculates values of contingency table marginals from its values.
|
399 |
+
QuadgramAssocMeasures._marginals(1, 0, 2, 46, 552, 825, 2577, 34967, 1, 0, 2, 48, 7250, 9031, 28585, 356653)
|
400 |
+
(1, (2, 553, 3, 1), (7804, 6, 3132, 1378, 49, 2), (38970, 17660, 100, 38970), 440540)
|
401 |
+
"""
|
402 |
+
(
|
403 |
+
n_iiii,
|
404 |
+
n_oiii,
|
405 |
+
n_ioii,
|
406 |
+
n_ooii,
|
407 |
+
n_iioi,
|
408 |
+
n_oioi,
|
409 |
+
n_iooi,
|
410 |
+
n_oooi,
|
411 |
+
n_iiio,
|
412 |
+
n_oiio,
|
413 |
+
n_ioio,
|
414 |
+
n_ooio,
|
415 |
+
n_iioo,
|
416 |
+
n_oioo,
|
417 |
+
n_iooo,
|
418 |
+
n_oooo,
|
419 |
+
) = contingency
|
420 |
+
|
421 |
+
n_iiix = n_iiii + n_iiio
|
422 |
+
n_iixi = n_iiii + n_iioi
|
423 |
+
n_ixii = n_iiii + n_ioii
|
424 |
+
n_xiii = n_iiii + n_oiii
|
425 |
+
|
426 |
+
n_iixx = n_iiii + n_iioi + n_iiio + n_iioo
|
427 |
+
n_ixix = n_iiii + n_ioii + n_iiio + n_ioio
|
428 |
+
n_ixxi = n_iiii + n_ioii + n_iioi + n_iooi
|
429 |
+
n_xixi = n_iiii + n_oiii + n_iioi + n_oioi
|
430 |
+
n_xxii = n_iiii + n_oiii + n_ioii + n_ooii
|
431 |
+
n_xiix = n_iiii + n_oiii + n_iiio + n_oiio
|
432 |
+
|
433 |
+
n_ixxx = n_iiii + n_ioii + n_iioi + n_iiio + n_iooi + n_iioo + n_ioio + n_iooo
|
434 |
+
n_xixx = n_iiii + n_oiii + n_iioi + n_iiio + n_oioi + n_oiio + n_iioo + n_oioo
|
435 |
+
n_xxix = n_iiii + n_oiii + n_ioii + n_iiio + n_ooii + n_ioio + n_oiio + n_ooio
|
436 |
+
n_xxxi = n_iiii + n_oiii + n_ioii + n_iioi + n_ooii + n_iooi + n_oioi + n_oooi
|
437 |
+
|
438 |
+
n_all = sum(contingency)
|
439 |
+
|
440 |
+
return (
|
441 |
+
n_iiii,
|
442 |
+
(n_iiix, n_iixi, n_ixii, n_xiii),
|
443 |
+
(n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
|
444 |
+
(n_ixxx, n_xixx, n_xxix, n_xxxi),
|
445 |
+
n_all,
|
446 |
+
)
|
447 |
+
|
448 |
+
|
449 |
+
class ContingencyMeasures:
|
450 |
+
"""Wraps NgramAssocMeasures classes such that the arguments of association
|
451 |
+
measures are contingency table values rather than marginals.
|
452 |
+
"""
|
453 |
+
|
454 |
+
def __init__(self, measures):
|
455 |
+
"""Constructs a ContingencyMeasures given a NgramAssocMeasures class"""
|
456 |
+
self.__class__.__name__ = "Contingency" + measures.__class__.__name__
|
457 |
+
for k in dir(measures):
|
458 |
+
if k.startswith("__"):
|
459 |
+
continue
|
460 |
+
v = getattr(measures, k)
|
461 |
+
if not k.startswith("_"):
|
462 |
+
v = self._make_contingency_fn(measures, v)
|
463 |
+
setattr(self, k, v)
|
464 |
+
|
465 |
+
@staticmethod
|
466 |
+
def _make_contingency_fn(measures, old_fn):
|
467 |
+
"""From an association measure function, produces a new function which
|
468 |
+
accepts contingency table values as its arguments.
|
469 |
+
"""
|
470 |
+
|
471 |
+
def res(*contingency):
|
472 |
+
return old_fn(*measures._marginals(*contingency))
|
473 |
+
|
474 |
+
res.__doc__ = old_fn.__doc__
|
475 |
+
res.__name__ = old_fn.__name__
|
476 |
+
return res
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/distance.py
ADDED
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Distance Metrics
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# Steven Bird <[email protected]>
|
6 |
+
# Tom Lippincott <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
#
|
10 |
+
|
11 |
+
"""
|
12 |
+
Distance Metrics.
|
13 |
+
|
14 |
+
Compute the distance between two items (usually strings).
|
15 |
+
As metrics, they must satisfy the following three requirements:
|
16 |
+
|
17 |
+
1. d(a, a) = 0
|
18 |
+
2. d(a, b) >= 0
|
19 |
+
3. d(a, c) <= d(a, b) + d(b, c)
|
20 |
+
"""
|
21 |
+
|
22 |
+
import operator
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
|
26 |
+
def _edit_dist_init(len1, len2):
|
27 |
+
lev = []
|
28 |
+
for i in range(len1):
|
29 |
+
lev.append([0] * len2) # initialize 2D array to zero
|
30 |
+
for i in range(len1):
|
31 |
+
lev[i][0] = i # column 0: 0,1,2,3,4,...
|
32 |
+
for j in range(len2):
|
33 |
+
lev[0][j] = j # row 0: 0,1,2,3,4,...
|
34 |
+
return lev
|
35 |
+
|
36 |
+
|
37 |
+
def _last_left_t_init(sigma):
|
38 |
+
return {c: 0 for c in sigma}
|
39 |
+
|
40 |
+
|
41 |
+
def _edit_dist_step(
|
42 |
+
lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False
|
43 |
+
):
|
44 |
+
c1 = s1[i - 1]
|
45 |
+
c2 = s2[j - 1]
|
46 |
+
|
47 |
+
# skipping a character in s1
|
48 |
+
a = lev[i - 1][j] + 1
|
49 |
+
# skipping a character in s2
|
50 |
+
b = lev[i][j - 1] + 1
|
51 |
+
# substitution
|
52 |
+
c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0)
|
53 |
+
|
54 |
+
# transposition
|
55 |
+
d = c + 1 # never picked by default
|
56 |
+
if transpositions and last_left > 0 and last_right > 0:
|
57 |
+
d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1
|
58 |
+
|
59 |
+
# pick the cheapest
|
60 |
+
lev[i][j] = min(a, b, c, d)
|
61 |
+
|
62 |
+
|
63 |
+
def edit_distance(s1, s2, substitution_cost=1, transpositions=False):
|
64 |
+
"""
|
65 |
+
Calculate the Levenshtein edit-distance between two strings.
|
66 |
+
The edit distance is the number of characters that need to be
|
67 |
+
substituted, inserted, or deleted, to transform s1 into s2. For
|
68 |
+
example, transforming "rain" to "shine" requires three steps,
|
69 |
+
consisting of two substitutions and one insertion:
|
70 |
+
"rain" -> "sain" -> "shin" -> "shine". These operations could have
|
71 |
+
been done in other orders, but at least three steps are needed.
|
72 |
+
|
73 |
+
Allows specifying the cost of substitution edits (e.g., "a" -> "b"),
|
74 |
+
because sometimes it makes sense to assign greater penalties to
|
75 |
+
substitutions.
|
76 |
+
|
77 |
+
This also optionally allows transposition edits (e.g., "ab" -> "ba"),
|
78 |
+
though this is disabled by default.
|
79 |
+
|
80 |
+
:param s1, s2: The strings to be analysed
|
81 |
+
:param transpositions: Whether to allow transposition edits
|
82 |
+
:type s1: str
|
83 |
+
:type s2: str
|
84 |
+
:type substitution_cost: int
|
85 |
+
:type transpositions: bool
|
86 |
+
:rtype: int
|
87 |
+
"""
|
88 |
+
# set up a 2-D array
|
89 |
+
len1 = len(s1)
|
90 |
+
len2 = len(s2)
|
91 |
+
lev = _edit_dist_init(len1 + 1, len2 + 1)
|
92 |
+
|
93 |
+
# retrieve alphabet
|
94 |
+
sigma = set()
|
95 |
+
sigma.update(s1)
|
96 |
+
sigma.update(s2)
|
97 |
+
|
98 |
+
# set up table to remember positions of last seen occurrence in s1
|
99 |
+
last_left_t = _last_left_t_init(sigma)
|
100 |
+
|
101 |
+
# iterate over the array
|
102 |
+
# i and j start from 1 and not 0 to stay close to the wikipedia pseudo-code
|
103 |
+
# see https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
|
104 |
+
for i in range(1, len1 + 1):
|
105 |
+
last_right_buf = 0
|
106 |
+
for j in range(1, len2 + 1):
|
107 |
+
last_left = last_left_t[s2[j - 1]]
|
108 |
+
last_right = last_right_buf
|
109 |
+
if s1[i - 1] == s2[j - 1]:
|
110 |
+
last_right_buf = j
|
111 |
+
_edit_dist_step(
|
112 |
+
lev,
|
113 |
+
i,
|
114 |
+
j,
|
115 |
+
s1,
|
116 |
+
s2,
|
117 |
+
last_left,
|
118 |
+
last_right,
|
119 |
+
substitution_cost=substitution_cost,
|
120 |
+
transpositions=transpositions,
|
121 |
+
)
|
122 |
+
last_left_t[s1[i - 1]] = i
|
123 |
+
return lev[len1][len2]
|
124 |
+
|
125 |
+
|
126 |
+
def _edit_dist_backtrace(lev):
|
127 |
+
i, j = len(lev) - 1, len(lev[0]) - 1
|
128 |
+
alignment = [(i, j)]
|
129 |
+
|
130 |
+
while (i, j) != (0, 0):
|
131 |
+
directions = [
|
132 |
+
(i - 1, j - 1), # substitution
|
133 |
+
(i - 1, j), # skip s1
|
134 |
+
(i, j - 1), # skip s2
|
135 |
+
]
|
136 |
+
|
137 |
+
direction_costs = (
|
138 |
+
(lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j))
|
139 |
+
for i, j in directions
|
140 |
+
)
|
141 |
+
_, (i, j) = min(direction_costs, key=operator.itemgetter(0))
|
142 |
+
|
143 |
+
alignment.append((i, j))
|
144 |
+
return list(reversed(alignment))
|
145 |
+
|
146 |
+
|
147 |
+
def edit_distance_align(s1, s2, substitution_cost=1):
|
148 |
+
"""
|
149 |
+
Calculate the minimum Levenshtein edit-distance based alignment
|
150 |
+
mapping between two strings. The alignment finds the mapping
|
151 |
+
from string s1 to s2 that minimizes the edit distance cost.
|
152 |
+
For example, mapping "rain" to "shine" would involve 2
|
153 |
+
substitutions, 2 matches and an insertion resulting in
|
154 |
+
the following mapping:
|
155 |
+
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)]
|
156 |
+
NB: (0, 0) is the start state without any letters associated
|
157 |
+
See more: https://web.stanford.edu/class/cs124/lec/med.pdf
|
158 |
+
|
159 |
+
In case of multiple valid minimum-distance alignments, the
|
160 |
+
backtrace has the following operation precedence:
|
161 |
+
|
162 |
+
1. Substitute s1 and s2 characters
|
163 |
+
2. Skip s1 character
|
164 |
+
3. Skip s2 character
|
165 |
+
|
166 |
+
The backtrace is carried out in reverse string order.
|
167 |
+
|
168 |
+
This function does not support transposition.
|
169 |
+
|
170 |
+
:param s1, s2: The strings to be aligned
|
171 |
+
:type s1: str
|
172 |
+
:type s2: str
|
173 |
+
:type substitution_cost: int
|
174 |
+
:rtype: List[Tuple(int, int)]
|
175 |
+
"""
|
176 |
+
# set up a 2-D array
|
177 |
+
len1 = len(s1)
|
178 |
+
len2 = len(s2)
|
179 |
+
lev = _edit_dist_init(len1 + 1, len2 + 1)
|
180 |
+
|
181 |
+
# iterate over the array
|
182 |
+
for i in range(len1):
|
183 |
+
for j in range(len2):
|
184 |
+
_edit_dist_step(
|
185 |
+
lev,
|
186 |
+
i + 1,
|
187 |
+
j + 1,
|
188 |
+
s1,
|
189 |
+
s2,
|
190 |
+
0,
|
191 |
+
0,
|
192 |
+
substitution_cost=substitution_cost,
|
193 |
+
transpositions=False,
|
194 |
+
)
|
195 |
+
|
196 |
+
# backtrace to find alignment
|
197 |
+
alignment = _edit_dist_backtrace(lev)
|
198 |
+
return alignment
|
199 |
+
|
200 |
+
|
201 |
+
def binary_distance(label1, label2):
|
202 |
+
"""Simple equality test.
|
203 |
+
|
204 |
+
0.0 if the labels are identical, 1.0 if they are different.
|
205 |
+
|
206 |
+
>>> from nltk.metrics import binary_distance
|
207 |
+
>>> binary_distance(1,1)
|
208 |
+
0.0
|
209 |
+
|
210 |
+
>>> binary_distance(1,3)
|
211 |
+
1.0
|
212 |
+
"""
|
213 |
+
|
214 |
+
return 0.0 if label1 == label2 else 1.0
|
215 |
+
|
216 |
+
|
217 |
+
def jaccard_distance(label1, label2):
|
218 |
+
"""Distance metric comparing set-similarity."""
|
219 |
+
return (len(label1.union(label2)) - len(label1.intersection(label2))) / len(
|
220 |
+
label1.union(label2)
|
221 |
+
)
|
222 |
+
|
223 |
+
|
224 |
+
def masi_distance(label1, label2):
|
225 |
+
"""Distance metric that takes into account partial agreement when multiple
|
226 |
+
labels are assigned.
|
227 |
+
|
228 |
+
>>> from nltk.metrics import masi_distance
|
229 |
+
>>> masi_distance(set([1, 2]), set([1, 2, 3, 4]))
|
230 |
+
0.665
|
231 |
+
|
232 |
+
Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI)
|
233 |
+
for Semantic and Pragmatic Annotation.
|
234 |
+
"""
|
235 |
+
|
236 |
+
len_intersection = len(label1.intersection(label2))
|
237 |
+
len_union = len(label1.union(label2))
|
238 |
+
len_label1 = len(label1)
|
239 |
+
len_label2 = len(label2)
|
240 |
+
if len_label1 == len_label2 and len_label1 == len_intersection:
|
241 |
+
m = 1
|
242 |
+
elif len_intersection == min(len_label1, len_label2):
|
243 |
+
m = 0.67
|
244 |
+
elif len_intersection > 0:
|
245 |
+
m = 0.33
|
246 |
+
else:
|
247 |
+
m = 0
|
248 |
+
|
249 |
+
return 1 - len_intersection / len_union * m
|
250 |
+
|
251 |
+
|
252 |
+
def interval_distance(label1, label2):
|
253 |
+
"""Krippendorff's interval distance metric
|
254 |
+
|
255 |
+
>>> from nltk.metrics import interval_distance
|
256 |
+
>>> interval_distance(1,10)
|
257 |
+
81
|
258 |
+
|
259 |
+
Krippendorff 1980, Content Analysis: An Introduction to its Methodology
|
260 |
+
"""
|
261 |
+
|
262 |
+
try:
|
263 |
+
return pow(label1 - label2, 2)
|
264 |
+
# return pow(list(label1)[0]-list(label2)[0],2)
|
265 |
+
except:
|
266 |
+
print("non-numeric labels not supported with interval distance")
|
267 |
+
|
268 |
+
|
269 |
+
def presence(label):
|
270 |
+
"""Higher-order function to test presence of a given label"""
|
271 |
+
|
272 |
+
return lambda x, y: 1.0 * ((label in x) == (label in y))
|
273 |
+
|
274 |
+
|
275 |
+
def fractional_presence(label):
|
276 |
+
return (
|
277 |
+
lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y)
|
278 |
+
or 0.0 * (label not in x and label not in y)
|
279 |
+
or abs(1.0 / len(x)) * (label in x and label not in y)
|
280 |
+
or (1.0 / len(y)) * (label not in x and label in y)
|
281 |
+
)
|
282 |
+
|
283 |
+
|
284 |
+
def custom_distance(file):
|
285 |
+
data = {}
|
286 |
+
with open(file) as infile:
|
287 |
+
for l in infile:
|
288 |
+
labelA, labelB, dist = l.strip().split("\t")
|
289 |
+
labelA = frozenset([labelA])
|
290 |
+
labelB = frozenset([labelB])
|
291 |
+
data[frozenset([labelA, labelB])] = float(dist)
|
292 |
+
return lambda x, y: data[frozenset([x, y])]
|
293 |
+
|
294 |
+
|
295 |
+
def jaro_similarity(s1, s2):
|
296 |
+
"""
|
297 |
+
Computes the Jaro similarity between 2 sequences from:
|
298 |
+
|
299 |
+
Matthew A. Jaro (1989). Advances in record linkage methodology
|
300 |
+
as applied to the 1985 census of Tampa Florida. Journal of the
|
301 |
+
American Statistical Association. 84 (406): 414-20.
|
302 |
+
|
303 |
+
The Jaro distance between is the min no. of single-character transpositions
|
304 |
+
required to change one word into another. The Jaro similarity formula from
|
305 |
+
https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance :
|
306 |
+
|
307 |
+
``jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)``
|
308 |
+
|
309 |
+
where
|
310 |
+
- `|s_i|` is the length of string `s_i`
|
311 |
+
- `m` is the no. of matching characters
|
312 |
+
- `t` is the half no. of possible transpositions.
|
313 |
+
"""
|
314 |
+
# First, store the length of the strings
|
315 |
+
# because they will be re-used several times.
|
316 |
+
len_s1, len_s2 = len(s1), len(s2)
|
317 |
+
|
318 |
+
# The upper bound of the distance for being a matched character.
|
319 |
+
match_bound = max(len_s1, len_s2) // 2 - 1
|
320 |
+
|
321 |
+
# Initialize the counts for matches and transpositions.
|
322 |
+
matches = 0 # no.of matched characters in s1 and s2
|
323 |
+
transpositions = 0 # no. of transpositions between s1 and s2
|
324 |
+
flagged_1 = [] # positions in s1 which are matches to some character in s2
|
325 |
+
flagged_2 = [] # positions in s2 which are matches to some character in s1
|
326 |
+
|
327 |
+
# Iterate through sequences, check for matches and compute transpositions.
|
328 |
+
for i in range(len_s1): # Iterate through each character.
|
329 |
+
upperbound = min(i + match_bound, len_s2 - 1)
|
330 |
+
lowerbound = max(0, i - match_bound)
|
331 |
+
for j in range(lowerbound, upperbound + 1):
|
332 |
+
if s1[i] == s2[j] and j not in flagged_2:
|
333 |
+
matches += 1
|
334 |
+
flagged_1.append(i)
|
335 |
+
flagged_2.append(j)
|
336 |
+
break
|
337 |
+
flagged_2.sort()
|
338 |
+
for i, j in zip(flagged_1, flagged_2):
|
339 |
+
if s1[i] != s2[j]:
|
340 |
+
transpositions += 1
|
341 |
+
|
342 |
+
if matches == 0:
|
343 |
+
return 0
|
344 |
+
else:
|
345 |
+
return (
|
346 |
+
1
|
347 |
+
/ 3
|
348 |
+
* (
|
349 |
+
matches / len_s1
|
350 |
+
+ matches / len_s2
|
351 |
+
+ (matches - transpositions // 2) / matches
|
352 |
+
)
|
353 |
+
)
|
354 |
+
|
355 |
+
|
356 |
+
def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4):
|
357 |
+
"""
|
358 |
+
The Jaro Winkler distance is an extension of the Jaro similarity in:
|
359 |
+
|
360 |
+
William E. Winkler. 1990. String Comparator Metrics and Enhanced
|
361 |
+
Decision Rules in the Fellegi-Sunter Model of Record Linkage.
|
362 |
+
Proceedings of the Section on Survey Research Methods.
|
363 |
+
American Statistical Association: 354-359.
|
364 |
+
|
365 |
+
such that:
|
366 |
+
|
367 |
+
jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) )
|
368 |
+
|
369 |
+
where,
|
370 |
+
|
371 |
+
- jaro_sim is the output from the Jaro Similarity,
|
372 |
+
see jaro_similarity()
|
373 |
+
- l is the length of common prefix at the start of the string
|
374 |
+
- this implementation provides an upperbound for the l value
|
375 |
+
to keep the prefixes.A common value of this upperbound is 4.
|
376 |
+
- p is the constant scaling factor to overweigh common prefixes.
|
377 |
+
The Jaro-Winkler similarity will fall within the [0, 1] bound,
|
378 |
+
given that max(p)<=0.25 , default is p=0.1 in Winkler (1990)
|
379 |
+
|
380 |
+
|
381 |
+
Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf
|
382 |
+
from "Table 5 Comparison of String Comparators Rescaled between 0 and 1"
|
383 |
+
|
384 |
+
>>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"),
|
385 |
+
... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"),
|
386 |
+
... ("dixon", "dickson"), ("billy", "susan")]
|
387 |
+
|
388 |
+
>>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000]
|
389 |
+
>>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000]
|
390 |
+
|
391 |
+
One way to match the values on the Winkler's paper is to provide a different
|
392 |
+
p scaling factor for different pairs of strings, e.g.
|
393 |
+
|
394 |
+
>>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1]
|
395 |
+
|
396 |
+
>>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
|
397 |
+
... assert round(jaro_similarity(s1, s2), 3) == jscore
|
398 |
+
... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
|
399 |
+
|
400 |
+
|
401 |
+
Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from
|
402 |
+
"Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names"
|
403 |
+
|
404 |
+
>>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'),
|
405 |
+
... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'),
|
406 |
+
... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'),
|
407 |
+
... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'),
|
408 |
+
... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'),
|
409 |
+
... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'),
|
410 |
+
... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'),
|
411 |
+
... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')]
|
412 |
+
|
413 |
+
>>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926,
|
414 |
+
... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905,
|
415 |
+
... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000]
|
416 |
+
|
417 |
+
>>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926,
|
418 |
+
... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943,
|
419 |
+
... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000]
|
420 |
+
|
421 |
+
One way to match the values on the Winkler's paper is to provide a different
|
422 |
+
p scaling factor for different pairs of strings, e.g.
|
423 |
+
|
424 |
+
>>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20,
|
425 |
+
... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
426 |
+
|
427 |
+
|
428 |
+
>>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
|
429 |
+
... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]:
|
430 |
+
... continue # Skip bad examples from the paper.
|
431 |
+
... assert round(jaro_similarity(s1, s2), 3) == jscore
|
432 |
+
... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
|
433 |
+
|
434 |
+
|
435 |
+
|
436 |
+
This test-case proves that the output of Jaro-Winkler similarity depends on
|
437 |
+
the product l * p and not on the product max_l * p. Here the product max_l * p > 1
|
438 |
+
however the product l * p <= 1
|
439 |
+
|
440 |
+
>>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3)
|
441 |
+
0.88
|
442 |
+
"""
|
443 |
+
# To ensure that the output of the Jaro-Winkler's similarity
|
444 |
+
# falls between [0,1], the product of l * p needs to be
|
445 |
+
# also fall between [0,1].
|
446 |
+
if not 0 <= max_l * p <= 1:
|
447 |
+
warnings.warn(
|
448 |
+
str(
|
449 |
+
"The product `max_l * p` might not fall between [0,1]."
|
450 |
+
"Jaro-Winkler similarity might not be between 0 and 1."
|
451 |
+
)
|
452 |
+
)
|
453 |
+
|
454 |
+
# Compute the Jaro similarity
|
455 |
+
jaro_sim = jaro_similarity(s1, s2)
|
456 |
+
|
457 |
+
# Initialize the upper bound for the no. of prefixes.
|
458 |
+
# if user did not pre-define the upperbound,
|
459 |
+
# use shorter length between s1 and s2
|
460 |
+
|
461 |
+
# Compute the prefix matches.
|
462 |
+
l = 0
|
463 |
+
# zip() will automatically loop until the end of shorter string.
|
464 |
+
for s1_i, s2_i in zip(s1, s2):
|
465 |
+
if s1_i == s2_i:
|
466 |
+
l += 1
|
467 |
+
else:
|
468 |
+
break
|
469 |
+
if l == max_l:
|
470 |
+
break
|
471 |
+
# Return the similarity value as described in docstring.
|
472 |
+
return jaro_sim + (l * p * (1 - jaro_sim))
|
473 |
+
|
474 |
+
|
475 |
+
def demo():
|
476 |
+
string_distance_examples = [
|
477 |
+
("rain", "shine"),
|
478 |
+
("abcdef", "acbdef"),
|
479 |
+
("language", "lnaguaeg"),
|
480 |
+
("language", "lnaugage"),
|
481 |
+
("language", "lngauage"),
|
482 |
+
]
|
483 |
+
for s1, s2 in string_distance_examples:
|
484 |
+
print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2))
|
485 |
+
print(
|
486 |
+
f"Edit dist with transpositions btwn '{s1}' and '{s2}':",
|
487 |
+
edit_distance(s1, s2, transpositions=True),
|
488 |
+
)
|
489 |
+
print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2))
|
490 |
+
print(
|
491 |
+
f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':",
|
492 |
+
jaro_winkler_similarity(s1, s2),
|
493 |
+
)
|
494 |
+
print(
|
495 |
+
f"Jaro-Winkler distance btwn '{s1}' and '{s2}':",
|
496 |
+
1 - jaro_winkler_similarity(s1, s2),
|
497 |
+
)
|
498 |
+
s1 = {1, 2, 3, 4}
|
499 |
+
s2 = {3, 4, 5}
|
500 |
+
print("s1:", s1)
|
501 |
+
print("s2:", s2)
|
502 |
+
print("Binary distance:", binary_distance(s1, s2))
|
503 |
+
print("Jaccard distance:", jaccard_distance(s1, s2))
|
504 |
+
print("MASI distance:", masi_distance(s1, s2))
|
505 |
+
|
506 |
+
|
507 |
+
if __name__ == "__main__":
|
508 |
+
demo()
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/scores.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Evaluation
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# Steven Bird <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
import operator
|
10 |
+
from functools import reduce
|
11 |
+
from math import fabs
|
12 |
+
from random import shuffle
|
13 |
+
|
14 |
+
try:
|
15 |
+
from scipy.stats.stats import betai
|
16 |
+
except ImportError:
|
17 |
+
betai = None
|
18 |
+
|
19 |
+
from nltk.util import LazyConcatenation, LazyMap
|
20 |
+
|
21 |
+
|
22 |
+
def accuracy(reference, test):
|
23 |
+
"""
|
24 |
+
Given a list of reference values and a corresponding list of test
|
25 |
+
values, return the fraction of corresponding values that are
|
26 |
+
equal. In particular, return the fraction of indices
|
27 |
+
``0<i<=len(test)`` such that ``test[i] == reference[i]``.
|
28 |
+
|
29 |
+
:type reference: list
|
30 |
+
:param reference: An ordered list of reference values.
|
31 |
+
:type test: list
|
32 |
+
:param test: A list of values to compare against the corresponding
|
33 |
+
reference values.
|
34 |
+
:raise ValueError: If ``reference`` and ``length`` do not have the
|
35 |
+
same length.
|
36 |
+
"""
|
37 |
+
if len(reference) != len(test):
|
38 |
+
raise ValueError("Lists must have the same length.")
|
39 |
+
return sum(x == y for x, y in zip(reference, test)) / len(test)
|
40 |
+
|
41 |
+
|
42 |
+
def precision(reference, test):
|
43 |
+
"""
|
44 |
+
Given a set of reference values and a set of test values, return
|
45 |
+
the fraction of test values that appear in the reference set.
|
46 |
+
In particular, return card(``reference`` intersection ``test``)/card(``test``).
|
47 |
+
If ``test`` is empty, then return None.
|
48 |
+
|
49 |
+
:type reference: set
|
50 |
+
:param reference: A set of reference values.
|
51 |
+
:type test: set
|
52 |
+
:param test: A set of values to compare against the reference set.
|
53 |
+
:rtype: float or None
|
54 |
+
"""
|
55 |
+
if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
|
56 |
+
raise TypeError("reference and test should be sets")
|
57 |
+
|
58 |
+
if len(test) == 0:
|
59 |
+
return None
|
60 |
+
else:
|
61 |
+
return len(reference.intersection(test)) / len(test)
|
62 |
+
|
63 |
+
|
64 |
+
def recall(reference, test):
|
65 |
+
"""
|
66 |
+
Given a set of reference values and a set of test values, return
|
67 |
+
the fraction of reference values that appear in the test set.
|
68 |
+
In particular, return card(``reference`` intersection ``test``)/card(``reference``).
|
69 |
+
If ``reference`` is empty, then return None.
|
70 |
+
|
71 |
+
:type reference: set
|
72 |
+
:param reference: A set of reference values.
|
73 |
+
:type test: set
|
74 |
+
:param test: A set of values to compare against the reference set.
|
75 |
+
:rtype: float or None
|
76 |
+
"""
|
77 |
+
if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
|
78 |
+
raise TypeError("reference and test should be sets")
|
79 |
+
|
80 |
+
if len(reference) == 0:
|
81 |
+
return None
|
82 |
+
else:
|
83 |
+
return len(reference.intersection(test)) / len(reference)
|
84 |
+
|
85 |
+
|
86 |
+
def f_measure(reference, test, alpha=0.5):
|
87 |
+
"""
|
88 |
+
Given a set of reference values and a set of test values, return
|
89 |
+
the f-measure of the test values, when compared against the
|
90 |
+
reference values. The f-measure is the harmonic mean of the
|
91 |
+
``precision`` and ``recall``, weighted by ``alpha``. In particular,
|
92 |
+
given the precision *p* and recall *r* defined by:
|
93 |
+
|
94 |
+
- *p* = card(``reference`` intersection ``test``)/card(``test``)
|
95 |
+
- *r* = card(``reference`` intersection ``test``)/card(``reference``)
|
96 |
+
|
97 |
+
The f-measure is:
|
98 |
+
|
99 |
+
- *1/(alpha/p + (1-alpha)/r)*
|
100 |
+
|
101 |
+
If either ``reference`` or ``test`` is empty, then ``f_measure``
|
102 |
+
returns None.
|
103 |
+
|
104 |
+
:type reference: set
|
105 |
+
:param reference: A set of reference values.
|
106 |
+
:type test: set
|
107 |
+
:param test: A set of values to compare against the reference set.
|
108 |
+
:rtype: float or None
|
109 |
+
"""
|
110 |
+
p = precision(reference, test)
|
111 |
+
r = recall(reference, test)
|
112 |
+
if p is None or r is None:
|
113 |
+
return None
|
114 |
+
if p == 0 or r == 0:
|
115 |
+
return 0
|
116 |
+
return 1.0 / (alpha / p + (1 - alpha) / r)
|
117 |
+
|
118 |
+
|
119 |
+
def log_likelihood(reference, test):
|
120 |
+
"""
|
121 |
+
Given a list of reference values and a corresponding list of test
|
122 |
+
probability distributions, return the average log likelihood of
|
123 |
+
the reference values, given the probability distributions.
|
124 |
+
|
125 |
+
:param reference: A list of reference values
|
126 |
+
:type reference: list
|
127 |
+
:param test: A list of probability distributions over values to
|
128 |
+
compare against the corresponding reference values.
|
129 |
+
:type test: list(ProbDistI)
|
130 |
+
"""
|
131 |
+
if len(reference) != len(test):
|
132 |
+
raise ValueError("Lists must have the same length.")
|
133 |
+
|
134 |
+
# Return the average value of dist.logprob(val).
|
135 |
+
total_likelihood = sum(dist.logprob(val) for (val, dist) in zip(reference, test))
|
136 |
+
return total_likelihood / len(reference)
|
137 |
+
|
138 |
+
|
139 |
+
def approxrand(a, b, **kwargs):
|
140 |
+
"""
|
141 |
+
Returns an approximate significance level between two lists of
|
142 |
+
independently generated test values.
|
143 |
+
|
144 |
+
Approximate randomization calculates significance by randomly drawing
|
145 |
+
from a sample of the possible permutations. At the limit of the number
|
146 |
+
of possible permutations, the significance level is exact. The
|
147 |
+
approximate significance level is the sample mean number of times the
|
148 |
+
statistic of the permutated lists varies from the actual statistic of
|
149 |
+
the unpermuted argument lists.
|
150 |
+
|
151 |
+
:return: a tuple containing an approximate significance level, the count
|
152 |
+
of the number of times the pseudo-statistic varied from the
|
153 |
+
actual statistic, and the number of shuffles
|
154 |
+
:rtype: tuple
|
155 |
+
:param a: a list of test values
|
156 |
+
:type a: list
|
157 |
+
:param b: another list of independently generated test values
|
158 |
+
:type b: list
|
159 |
+
"""
|
160 |
+
shuffles = kwargs.get("shuffles", 999)
|
161 |
+
# there's no point in trying to shuffle beyond all possible permutations
|
162 |
+
shuffles = min(shuffles, reduce(operator.mul, range(1, len(a) + len(b) + 1)))
|
163 |
+
stat = kwargs.get("statistic", lambda lst: sum(lst) / len(lst))
|
164 |
+
verbose = kwargs.get("verbose", False)
|
165 |
+
|
166 |
+
if verbose:
|
167 |
+
print("shuffles: %d" % shuffles)
|
168 |
+
|
169 |
+
actual_stat = fabs(stat(a) - stat(b))
|
170 |
+
|
171 |
+
if verbose:
|
172 |
+
print("actual statistic: %f" % actual_stat)
|
173 |
+
print("-" * 60)
|
174 |
+
|
175 |
+
c = 1e-100
|
176 |
+
lst = LazyConcatenation([a, b])
|
177 |
+
indices = list(range(len(a) + len(b)))
|
178 |
+
|
179 |
+
for i in range(shuffles):
|
180 |
+
if verbose and i % 10 == 0:
|
181 |
+
print("shuffle: %d" % i)
|
182 |
+
|
183 |
+
shuffle(indices)
|
184 |
+
|
185 |
+
pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[: len(a)]))
|
186 |
+
pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a) :]))
|
187 |
+
pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)
|
188 |
+
|
189 |
+
if pseudo_stat >= actual_stat:
|
190 |
+
c += 1
|
191 |
+
|
192 |
+
if verbose and i % 10 == 0:
|
193 |
+
print("pseudo-statistic: %f" % pseudo_stat)
|
194 |
+
print("significance: %f" % ((c + 1) / (i + 1)))
|
195 |
+
print("-" * 60)
|
196 |
+
|
197 |
+
significance = (c + 1) / (shuffles + 1)
|
198 |
+
|
199 |
+
if verbose:
|
200 |
+
print("significance: %f" % significance)
|
201 |
+
if betai:
|
202 |
+
for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
|
203 |
+
print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}")
|
204 |
+
|
205 |
+
return (significance, c, shuffles)
|
206 |
+
|
207 |
+
|
208 |
+
def demo():
|
209 |
+
print("-" * 75)
|
210 |
+
reference = "DET NN VB DET JJ NN NN IN DET NN".split()
|
211 |
+
test = "DET VB VB DET NN NN NN IN DET NN".split()
|
212 |
+
print("Reference =", reference)
|
213 |
+
print("Test =", test)
|
214 |
+
print("Accuracy:", accuracy(reference, test))
|
215 |
+
|
216 |
+
print("-" * 75)
|
217 |
+
reference_set = set(reference)
|
218 |
+
test_set = set(test)
|
219 |
+
print("Reference =", reference_set)
|
220 |
+
print("Test = ", test_set)
|
221 |
+
print("Precision:", precision(reference_set, test_set))
|
222 |
+
print(" Recall:", recall(reference_set, test_set))
|
223 |
+
print("F-Measure:", f_measure(reference_set, test_set))
|
224 |
+
print("-" * 75)
|
225 |
+
|
226 |
+
|
227 |
+
if __name__ == "__main__":
|
228 |
+
demo()
|
env-llmeval/lib/python3.10/site-packages/nltk/metrics/spearman.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Spearman Rank Correlation
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Joel Nothman <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Tools for comparing ranked lists.
|
10 |
+
"""
|
11 |
+
|
12 |
+
|
13 |
+
def _rank_dists(ranks1, ranks2):
|
14 |
+
"""Finds the difference between the values in ranks1 and ranks2 for keys
|
15 |
+
present in both dicts. If the arguments are not dicts, they are converted
|
16 |
+
from (key, rank) sequences.
|
17 |
+
"""
|
18 |
+
ranks1 = dict(ranks1)
|
19 |
+
ranks2 = dict(ranks2)
|
20 |
+
for k in ranks1:
|
21 |
+
try:
|
22 |
+
yield k, ranks1[k] - ranks2[k]
|
23 |
+
except KeyError:
|
24 |
+
pass
|
25 |
+
|
26 |
+
|
27 |
+
def spearman_correlation(ranks1, ranks2):
|
28 |
+
"""Returns the Spearman correlation coefficient for two rankings, which
|
29 |
+
should be dicts or sequences of (key, rank). The coefficient ranges from
|
30 |
+
-1.0 (ranks are opposite) to 1.0 (ranks are identical), and is only
|
31 |
+
calculated for keys in both rankings (for meaningful results, remove keys
|
32 |
+
present in only one list before ranking)."""
|
33 |
+
n = 0
|
34 |
+
res = 0
|
35 |
+
for k, d in _rank_dists(ranks1, ranks2):
|
36 |
+
res += d * d
|
37 |
+
n += 1
|
38 |
+
try:
|
39 |
+
return 1 - (6 * res / (n * (n * n - 1)))
|
40 |
+
except ZeroDivisionError:
|
41 |
+
# Result is undefined if only one item is ranked
|
42 |
+
return 0.0
|
43 |
+
|
44 |
+
|
45 |
+
def ranks_from_sequence(seq):
|
46 |
+
"""Given a sequence, yields each element with an increasing rank, suitable
|
47 |
+
for use as an argument to ``spearman_correlation``.
|
48 |
+
"""
|
49 |
+
return ((k, i) for i, k in enumerate(seq))
|
50 |
+
|
51 |
+
|
52 |
+
def ranks_from_scores(scores, rank_gap=1e-15):
|
53 |
+
"""Given a sequence of (key, score) tuples, yields each key with an
|
54 |
+
increasing rank, tying with previous key's rank if the difference between
|
55 |
+
their scores is less than rank_gap. Suitable for use as an argument to
|
56 |
+
``spearman_correlation``.
|
57 |
+
"""
|
58 |
+
prev_score = None
|
59 |
+
rank = 0
|
60 |
+
for i, (key, score) in enumerate(scores):
|
61 |
+
try:
|
62 |
+
if abs(score - prev_score) > rank_gap:
|
63 |
+
rank = i
|
64 |
+
except TypeError:
|
65 |
+
pass
|
66 |
+
|
67 |
+
yield key, rank
|
68 |
+
prev_score = score
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Miscellaneous modules
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
from nltk.misc.babelfish import babelize_shell
|
9 |
+
from nltk.misc.chomsky import generate_chomsky
|
10 |
+
from nltk.misc.minimalset import MinimalSet
|
11 |
+
from nltk.misc.wordfinder import word_finder
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (401 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc
ADDED
Binary file (616 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc
ADDED
Binary file (5.11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc
ADDED
Binary file (2.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc
ADDED
Binary file (3.48 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc
ADDED
Binary file (4.06 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/babelfish.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module previously provided an interface to Babelfish online
|
3 |
+
translation service; this service is no longer available; this
|
4 |
+
module is kept in NLTK source code in order to provide better error
|
5 |
+
messages for people following the NLTK Book 2.0.
|
6 |
+
"""
|
7 |
+
|
8 |
+
|
9 |
+
def babelize_shell():
|
10 |
+
print("Babelfish online translation service is no longer available.")
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/chomsky.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13
|
2 |
+
# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546
|
3 |
+
|
4 |
+
"""
|
5 |
+
CHOMSKY is an aid to writing linguistic papers in the style
|
6 |
+
of the great master. It is based on selected phrases taken
|
7 |
+
from actual books and articles written by Noam Chomsky.
|
8 |
+
Upon request, it assembles the phrases in the elegant
|
9 |
+
stylistic patterns that Chomsky is noted for.
|
10 |
+
To generate n sentences of linguistic wisdom, type
|
11 |
+
|
12 |
+
(CHOMSKY n) -- for example
|
13 |
+
(CHOMSKY 5) generates half a screen of linguistic truth.
|
14 |
+
"""
|
15 |
+
|
16 |
+
leadins = """To characterize a linguistic level L,
|
17 |
+
On the other hand,
|
18 |
+
This suggests that
|
19 |
+
It appears that
|
20 |
+
Furthermore,
|
21 |
+
We will bring evidence in favor of the following thesis:
|
22 |
+
To provide a constituent structure for T(Z,K),
|
23 |
+
From C1, it follows that
|
24 |
+
For any transformation which is sufficiently diversified in \
|
25 |
+
application to be of any interest,
|
26 |
+
Analogously,
|
27 |
+
Clearly,
|
28 |
+
Note that
|
29 |
+
Of course,
|
30 |
+
Suppose, for instance, that
|
31 |
+
Thus
|
32 |
+
With this clarification,
|
33 |
+
Conversely,
|
34 |
+
We have already seen that
|
35 |
+
By combining adjunctions and certain deformations,
|
36 |
+
I suggested that these results would follow from the assumption that
|
37 |
+
If the position of the trace in (99c) were only relatively \
|
38 |
+
inaccessible to movement,
|
39 |
+
However, this assumption is not correct, since
|
40 |
+
Comparing these examples with their parasitic gap counterparts in \
|
41 |
+
(96) and (97), we see that
|
42 |
+
In the discussion of resumptive pronouns following (81),
|
43 |
+
So far,
|
44 |
+
Nevertheless,
|
45 |
+
For one thing,
|
46 |
+
Summarizing, then, we assume that
|
47 |
+
A consequence of the approach just outlined is that
|
48 |
+
Presumably,
|
49 |
+
On our assumptions,
|
50 |
+
It may be, then, that
|
51 |
+
It must be emphasized, once again, that
|
52 |
+
Let us continue to suppose that
|
53 |
+
Notice, incidentally, that """
|
54 |
+
# List of LEADINs to buy time.
|
55 |
+
|
56 |
+
subjects = """ the notion of level of grammaticalness
|
57 |
+
a case of semigrammaticalness of a different sort
|
58 |
+
most of the methodological work in modern linguistics
|
59 |
+
a subset of English sentences interesting on quite independent grounds
|
60 |
+
the natural general principle that will subsume this case
|
61 |
+
an important property of these three types of EC
|
62 |
+
any associated supporting element
|
63 |
+
the appearance of parasitic gaps in domains relatively inaccessible \
|
64 |
+
to ordinary extraction
|
65 |
+
the speaker-hearer's linguistic intuition
|
66 |
+
the descriptive power of the base component
|
67 |
+
the earlier discussion of deviance
|
68 |
+
this analysis of a formative as a pair of sets of features
|
69 |
+
this selectionally introduced contextual feature
|
70 |
+
a descriptively adequate grammar
|
71 |
+
the fundamental error of regarding functional notions as categorial
|
72 |
+
relational information
|
73 |
+
the systematic use of complex symbols
|
74 |
+
the theory of syntactic features developed earlier"""
|
75 |
+
# List of SUBJECTs chosen for maximum professorial macho.
|
76 |
+
|
77 |
+
verbs = """can be defined in such a way as to impose
|
78 |
+
delimits
|
79 |
+
suffices to account for
|
80 |
+
cannot be arbitrary in
|
81 |
+
is not subject to
|
82 |
+
does not readily tolerate
|
83 |
+
raises serious doubts about
|
84 |
+
is not quite equivalent to
|
85 |
+
does not affect the structure of
|
86 |
+
may remedy and, at the same time, eliminate
|
87 |
+
is not to be considered in determining
|
88 |
+
is to be regarded as
|
89 |
+
is unspecified with respect to
|
90 |
+
is, apparently, determined by
|
91 |
+
is necessary to impose an interpretation on
|
92 |
+
appears to correlate rather closely with
|
93 |
+
is rather different from"""
|
94 |
+
# List of VERBs chosen for autorecursive obfuscation.
|
95 |
+
|
96 |
+
objects = """ problems of phonemic and morphological analysis.
|
97 |
+
a corpus of utterance tokens upon which conformity has been defined \
|
98 |
+
by the paired utterance test.
|
99 |
+
the traditional practice of grammarians.
|
100 |
+
the levels of acceptability from fairly high (e.g. (99a)) to virtual \
|
101 |
+
gibberish (e.g. (98d)).
|
102 |
+
a stipulation to place the constructions into these various categories.
|
103 |
+
a descriptive fact.
|
104 |
+
a parasitic gap construction.
|
105 |
+
the extended c-command discussed in connection with (34).
|
106 |
+
the ultimate standard that determines the accuracy of any proposed grammar.
|
107 |
+
the system of base rules exclusive of the lexicon.
|
108 |
+
irrelevant intervening contexts in selectional rules.
|
109 |
+
nondistinctness in the sense of distinctive feature theory.
|
110 |
+
a general convention regarding the forms of the grammar.
|
111 |
+
an abstract underlying order.
|
112 |
+
an important distinction in language use.
|
113 |
+
the requirement that branching is not tolerated within the dominance \
|
114 |
+
scope of a complex symbol.
|
115 |
+
the strong generative capacity of the theory."""
|
116 |
+
# List of OBJECTs selected for profound sententiousness.
|
117 |
+
|
118 |
+
import random
|
119 |
+
import textwrap
|
120 |
+
from itertools import chain, islice
|
121 |
+
|
122 |
+
|
123 |
+
def generate_chomsky(times=5, line_length=72):
|
124 |
+
parts = []
|
125 |
+
for part in (leadins, subjects, verbs, objects):
|
126 |
+
phraselist = list(map(str.strip, part.splitlines()))
|
127 |
+
random.shuffle(phraselist)
|
128 |
+
parts.append(phraselist)
|
129 |
+
output = chain.from_iterable(islice(zip(*parts), 0, times))
|
130 |
+
print(textwrap.fill(" ".join(output), line_length))
|
131 |
+
|
132 |
+
|
133 |
+
if __name__ == "__main__":
|
134 |
+
generate_chomsky()
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/minimalset.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Minimal Sets
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
from collections import defaultdict
|
9 |
+
|
10 |
+
|
11 |
+
class MinimalSet:
|
12 |
+
"""
|
13 |
+
Find contexts where more than one possible target value can
|
14 |
+
appear. E.g. if targets are word-initial letters, and contexts
|
15 |
+
are the remainders of words, then we would like to find cases like
|
16 |
+
"fat" vs "cat", and "training" vs "draining". If targets are
|
17 |
+
parts-of-speech and contexts are words, then we would like to find
|
18 |
+
cases like wind (noun) 'air in rapid motion', vs wind (verb)
|
19 |
+
'coil, wrap'.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(self, parameters=None):
|
23 |
+
"""
|
24 |
+
Create a new minimal set.
|
25 |
+
|
26 |
+
:param parameters: The (context, target, display) tuples for the item
|
27 |
+
:type parameters: list(tuple(str, str, str))
|
28 |
+
"""
|
29 |
+
self._targets = set() # the contrastive information
|
30 |
+
self._contexts = set() # what we are controlling for
|
31 |
+
self._seen = defaultdict(set) # to record what we have seen
|
32 |
+
self._displays = {} # what we will display
|
33 |
+
|
34 |
+
if parameters:
|
35 |
+
for context, target, display in parameters:
|
36 |
+
self.add(context, target, display)
|
37 |
+
|
38 |
+
def add(self, context, target, display):
|
39 |
+
"""
|
40 |
+
Add a new item to the minimal set, having the specified
|
41 |
+
context, target, and display form.
|
42 |
+
|
43 |
+
:param context: The context in which the item of interest appears
|
44 |
+
:type context: str
|
45 |
+
:param target: The item of interest
|
46 |
+
:type target: str
|
47 |
+
:param display: The information to be reported for each item
|
48 |
+
:type display: str
|
49 |
+
"""
|
50 |
+
# Store the set of targets that occurred in this context
|
51 |
+
self._seen[context].add(target)
|
52 |
+
|
53 |
+
# Keep track of which contexts and targets we have seen
|
54 |
+
self._contexts.add(context)
|
55 |
+
self._targets.add(target)
|
56 |
+
|
57 |
+
# For a given context and target, store the display form
|
58 |
+
self._displays[(context, target)] = display
|
59 |
+
|
60 |
+
def contexts(self, minimum=2):
|
61 |
+
"""
|
62 |
+
Determine which contexts occurred with enough distinct targets.
|
63 |
+
|
64 |
+
:param minimum: the minimum number of distinct target forms
|
65 |
+
:type minimum: int
|
66 |
+
:rtype: list
|
67 |
+
"""
|
68 |
+
return [c for c in self._contexts if len(self._seen[c]) >= minimum]
|
69 |
+
|
70 |
+
def display(self, context, target, default=""):
|
71 |
+
if (context, target) in self._displays:
|
72 |
+
return self._displays[(context, target)]
|
73 |
+
else:
|
74 |
+
return default
|
75 |
+
|
76 |
+
def display_all(self, context):
|
77 |
+
result = []
|
78 |
+
for target in self._targets:
|
79 |
+
x = self.display(context, target)
|
80 |
+
if x:
|
81 |
+
result.append(x)
|
82 |
+
return result
|
83 |
+
|
84 |
+
def targets(self):
|
85 |
+
return self._targets
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/sort.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: List Sorting
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
This module provides a variety of list sorting algorithms, to
|
10 |
+
illustrate the many different algorithms (recipes) for solving a
|
11 |
+
problem, and how to analyze algorithms experimentally.
|
12 |
+
"""
|
13 |
+
# These algorithms are taken from:
|
14 |
+
# Levitin (2004) The Design and Analysis of Algorithms
|
15 |
+
|
16 |
+
##################################################################
|
17 |
+
# Selection Sort
|
18 |
+
##################################################################
|
19 |
+
|
20 |
+
|
21 |
+
def selection(a):
|
22 |
+
"""
|
23 |
+
Selection Sort: scan the list to find its smallest element, then
|
24 |
+
swap it with the first element. The remainder of the list is one
|
25 |
+
element smaller; apply the same method to this list, and so on.
|
26 |
+
"""
|
27 |
+
count = 0
|
28 |
+
|
29 |
+
for i in range(len(a) - 1):
|
30 |
+
min = i
|
31 |
+
|
32 |
+
for j in range(i + 1, len(a)):
|
33 |
+
if a[j] < a[min]:
|
34 |
+
min = j
|
35 |
+
|
36 |
+
count += 1
|
37 |
+
|
38 |
+
a[min], a[i] = a[i], a[min]
|
39 |
+
|
40 |
+
return count
|
41 |
+
|
42 |
+
|
43 |
+
##################################################################
|
44 |
+
# Bubble Sort
|
45 |
+
##################################################################
|
46 |
+
|
47 |
+
|
48 |
+
def bubble(a):
|
49 |
+
"""
|
50 |
+
Bubble Sort: compare adjacent elements of the list left-to-right,
|
51 |
+
and swap them if they are out of order. After one pass through
|
52 |
+
the list swapping adjacent items, the largest item will be in
|
53 |
+
the rightmost position. The remainder is one element smaller;
|
54 |
+
apply the same method to this list, and so on.
|
55 |
+
"""
|
56 |
+
count = 0
|
57 |
+
for i in range(len(a) - 1):
|
58 |
+
for j in range(len(a) - i - 1):
|
59 |
+
if a[j + 1] < a[j]:
|
60 |
+
a[j], a[j + 1] = a[j + 1], a[j]
|
61 |
+
count += 1
|
62 |
+
return count
|
63 |
+
|
64 |
+
|
65 |
+
##################################################################
|
66 |
+
# Merge Sort
|
67 |
+
##################################################################
|
68 |
+
|
69 |
+
|
70 |
+
def _merge_lists(b, c):
|
71 |
+
count = 0
|
72 |
+
i = j = 0
|
73 |
+
a = []
|
74 |
+
while i < len(b) and j < len(c):
|
75 |
+
count += 1
|
76 |
+
if b[i] <= c[j]:
|
77 |
+
a.append(b[i])
|
78 |
+
i += 1
|
79 |
+
else:
|
80 |
+
a.append(c[j])
|
81 |
+
j += 1
|
82 |
+
if i == len(b):
|
83 |
+
a += c[j:]
|
84 |
+
else:
|
85 |
+
a += b[i:]
|
86 |
+
return a, count
|
87 |
+
|
88 |
+
|
89 |
+
def merge(a):
|
90 |
+
"""
|
91 |
+
Merge Sort: split the list in half, and sort each half, then
|
92 |
+
combine the sorted halves.
|
93 |
+
"""
|
94 |
+
count = 0
|
95 |
+
if len(a) > 1:
|
96 |
+
midpoint = len(a) // 2
|
97 |
+
b = a[:midpoint]
|
98 |
+
c = a[midpoint:]
|
99 |
+
count_b = merge(b)
|
100 |
+
count_c = merge(c)
|
101 |
+
result, count_a = _merge_lists(b, c)
|
102 |
+
a[:] = result # copy the result back into a.
|
103 |
+
count = count_a + count_b + count_c
|
104 |
+
return count
|
105 |
+
|
106 |
+
|
107 |
+
##################################################################
|
108 |
+
# Quick Sort
|
109 |
+
##################################################################
|
110 |
+
|
111 |
+
|
112 |
+
def _partition(a, l, r):
|
113 |
+
p = a[l]
|
114 |
+
i = l
|
115 |
+
j = r + 1
|
116 |
+
count = 0
|
117 |
+
while True:
|
118 |
+
while i < r:
|
119 |
+
i += 1
|
120 |
+
if a[i] >= p:
|
121 |
+
break
|
122 |
+
while j > l:
|
123 |
+
j -= 1
|
124 |
+
if j < l or a[j] <= p:
|
125 |
+
break
|
126 |
+
a[i], a[j] = a[j], a[i] # swap
|
127 |
+
count += 1
|
128 |
+
if i >= j:
|
129 |
+
break
|
130 |
+
a[i], a[j] = a[j], a[i] # undo last swap
|
131 |
+
a[l], a[j] = a[j], a[l]
|
132 |
+
return j, count
|
133 |
+
|
134 |
+
|
135 |
+
def _quick(a, l, r):
|
136 |
+
count = 0
|
137 |
+
if l < r:
|
138 |
+
s, count = _partition(a, l, r)
|
139 |
+
count += _quick(a, l, s - 1)
|
140 |
+
count += _quick(a, s + 1, r)
|
141 |
+
return count
|
142 |
+
|
143 |
+
|
144 |
+
def quick(a):
|
145 |
+
return _quick(a, 0, len(a) - 1)
|
146 |
+
|
147 |
+
|
148 |
+
##################################################################
|
149 |
+
# Demonstration
|
150 |
+
##################################################################
|
151 |
+
|
152 |
+
|
153 |
+
def demo():
|
154 |
+
from random import shuffle
|
155 |
+
|
156 |
+
for size in (10, 20, 50, 100, 200, 500, 1000):
|
157 |
+
a = list(range(size))
|
158 |
+
|
159 |
+
# various sort methods
|
160 |
+
shuffle(a)
|
161 |
+
count_selection = selection(a)
|
162 |
+
shuffle(a)
|
163 |
+
count_bubble = bubble(a)
|
164 |
+
shuffle(a)
|
165 |
+
count_merge = merge(a)
|
166 |
+
shuffle(a)
|
167 |
+
count_quick = quick(a)
|
168 |
+
|
169 |
+
print(
|
170 |
+
("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d")
|
171 |
+
% (size, count_selection, count_bubble, count_merge, count_quick)
|
172 |
+
)
|
173 |
+
|
174 |
+
|
175 |
+
if __name__ == "__main__":
|
176 |
+
demo()
|
env-llmeval/lib/python3.10/site-packages/nltk/misc/wordfinder.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Word Finder
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
# Simplified from PHP version by Robert Klein <[email protected]>
|
9 |
+
# http://fswordfinder.sourceforge.net/
|
10 |
+
|
11 |
+
import random
|
12 |
+
|
13 |
+
|
14 |
+
# reverse a word with probability 0.5
|
15 |
+
def revword(word):
|
16 |
+
if random.randint(1, 2) == 1:
|
17 |
+
return word[::-1]
|
18 |
+
return word
|
19 |
+
|
20 |
+
|
21 |
+
# try to insert word at position x,y; direction encoded in xf,yf
|
22 |
+
def step(word, x, xf, y, yf, grid):
|
23 |
+
for i in range(len(word)):
|
24 |
+
if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]:
|
25 |
+
return False
|
26 |
+
for i in range(len(word)):
|
27 |
+
grid[xf(i)][yf(i)] = word[i]
|
28 |
+
return True
|
29 |
+
|
30 |
+
|
31 |
+
# try to insert word at position x,y, in direction dir
|
32 |
+
def check(word, dir, x, y, grid, rows, cols):
|
33 |
+
if dir == 1:
|
34 |
+
if x - len(word) < 0 or y - len(word) < 0:
|
35 |
+
return False
|
36 |
+
return step(word, x, lambda i: x - i, y, lambda i: y - i, grid)
|
37 |
+
elif dir == 2:
|
38 |
+
if x - len(word) < 0:
|
39 |
+
return False
|
40 |
+
return step(word, x, lambda i: x - i, y, lambda i: y, grid)
|
41 |
+
elif dir == 3:
|
42 |
+
if x - len(word) < 0 or y + (len(word) - 1) >= cols:
|
43 |
+
return False
|
44 |
+
return step(word, x, lambda i: x - i, y, lambda i: y + i, grid)
|
45 |
+
elif dir == 4:
|
46 |
+
if y - len(word) < 0:
|
47 |
+
return False
|
48 |
+
return step(word, x, lambda i: x, y, lambda i: y - i, grid)
|
49 |
+
|
50 |
+
|
51 |
+
def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
|
52 |
+
"""
|
53 |
+
Attempt to arrange words into a letter-grid with the specified
|
54 |
+
number of rows and columns. Try each word in several positions
|
55 |
+
and directions, until it can be fitted into the grid, or the
|
56 |
+
maximum number of allowable attempts is exceeded. Returns a tuple
|
57 |
+
consisting of the grid and the words that were successfully
|
58 |
+
placed.
|
59 |
+
|
60 |
+
:param words: the list of words to be put into the grid
|
61 |
+
:type words: list
|
62 |
+
:param rows: the number of rows in the grid
|
63 |
+
:type rows: int
|
64 |
+
:param cols: the number of columns in the grid
|
65 |
+
:type cols: int
|
66 |
+
:param attempts: the number of times to attempt placing a word
|
67 |
+
:type attempts: int
|
68 |
+
:param alph: the alphabet, to be used for filling blank cells
|
69 |
+
:type alph: list
|
70 |
+
:rtype: tuple
|
71 |
+
"""
|
72 |
+
|
73 |
+
# place longer words first
|
74 |
+
words = sorted(words, key=len, reverse=True)
|
75 |
+
|
76 |
+
grid = [] # the letter grid
|
77 |
+
used = [] # the words we used
|
78 |
+
|
79 |
+
# initialize the grid
|
80 |
+
for i in range(rows):
|
81 |
+
grid.append([""] * cols)
|
82 |
+
|
83 |
+
# try to place each word
|
84 |
+
for word in words:
|
85 |
+
word = word.strip().upper() # normalize
|
86 |
+
save = word # keep a record of the word
|
87 |
+
word = revword(word)
|
88 |
+
for attempt in range(attempts):
|
89 |
+
r = random.randint(0, len(word))
|
90 |
+
dir = random.choice([1, 2, 3, 4])
|
91 |
+
x = random.randint(0, rows)
|
92 |
+
y = random.randint(0, cols)
|
93 |
+
if dir == 1:
|
94 |
+
x += r
|
95 |
+
y += r
|
96 |
+
elif dir == 2:
|
97 |
+
x += r
|
98 |
+
elif dir == 3:
|
99 |
+
x += r
|
100 |
+
y -= r
|
101 |
+
elif dir == 4:
|
102 |
+
y += r
|
103 |
+
if 0 <= x < rows and 0 <= y < cols:
|
104 |
+
if check(word, dir, x, y, grid, rows, cols):
|
105 |
+
# used.append((save, dir, x, y, word))
|
106 |
+
used.append(save)
|
107 |
+
break
|
108 |
+
|
109 |
+
# Fill up the remaining spaces
|
110 |
+
for i in range(rows):
|
111 |
+
for j in range(cols):
|
112 |
+
if grid[i][j] == "":
|
113 |
+
grid[i][j] = random.choice(alph)
|
114 |
+
|
115 |
+
return grid, used
|
116 |
+
|
117 |
+
|
118 |
+
def word_finder():
|
119 |
+
from nltk.corpus import words
|
120 |
+
|
121 |
+
wordlist = words.words()
|
122 |
+
random.shuffle(wordlist)
|
123 |
+
wordlist = wordlist[:200]
|
124 |
+
wordlist = [w for w in wordlist if 3 <= len(w) <= 12]
|
125 |
+
grid, used = wordfinder(wordlist)
|
126 |
+
|
127 |
+
print("Word Finder\n")
|
128 |
+
for i in range(len(grid)):
|
129 |
+
for j in range(len(grid[i])):
|
130 |
+
print(grid[i][j], end=" ")
|
131 |
+
print()
|
132 |
+
print()
|
133 |
+
|
134 |
+
for i in range(len(used)):
|
135 |
+
print("%d:" % (i + 1), used[i])
|
136 |
+
|
137 |
+
|
138 |
+
if __name__ == "__main__":
|
139 |
+
word_finder()
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__init__.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Stemmers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# Steven Bird <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
"""
|
11 |
+
NLTK Stemmers
|
12 |
+
|
13 |
+
Interfaces used to remove morphological affixes from words, leaving
|
14 |
+
only the word stem. Stemming algorithms aim to remove those affixes
|
15 |
+
required for eg. grammatical role, tense, derivational morphology
|
16 |
+
leaving only the stem of the word. This is a difficult problem due to
|
17 |
+
irregular words (eg. common verbs in English), complicated
|
18 |
+
morphological rules, and part-of-speech and sense ambiguities
|
19 |
+
(eg. ``ceil-`` is not the stem of ``ceiling``).
|
20 |
+
|
21 |
+
StemmerI defines a standard interface for stemmers.
|
22 |
+
"""
|
23 |
+
|
24 |
+
from nltk.stem.api import StemmerI
|
25 |
+
from nltk.stem.arlstem import ARLSTem
|
26 |
+
from nltk.stem.arlstem2 import ARLSTem2
|
27 |
+
from nltk.stem.cistem import Cistem
|
28 |
+
from nltk.stem.isri import ISRIStemmer
|
29 |
+
from nltk.stem.lancaster import LancasterStemmer
|
30 |
+
from nltk.stem.porter import PorterStemmer
|
31 |
+
from nltk.stem.regexp import RegexpStemmer
|
32 |
+
from nltk.stem.rslp import RSLPStemmer
|
33 |
+
from nltk.stem.snowball import SnowballStemmer
|
34 |
+
from nltk.stem.wordnet import WordNetLemmatizer
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.28 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc
ADDED
Binary file (819 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc
ADDED
Binary file (8.29 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc
ADDED
Binary file (9.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc
ADDED
Binary file (6.33 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc
ADDED
Binary file (9.23 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc
ADDED
Binary file (6.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc
ADDED
Binary file (21.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc
ADDED
Binary file (97.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc
ADDED
Binary file (679 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc
ADDED
Binary file (1.85 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/api.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Stemmer Interface
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# Steven Bird <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
from abc import ABCMeta, abstractmethod
|
11 |
+
|
12 |
+
|
13 |
+
class StemmerI(metaclass=ABCMeta):
|
14 |
+
"""
|
15 |
+
A processing interface for removing morphological affixes from
|
16 |
+
words. This process is known as stemming.
|
17 |
+
|
18 |
+
"""
|
19 |
+
|
20 |
+
@abstractmethod
|
21 |
+
def stem(self, token):
|
22 |
+
"""
|
23 |
+
Strip affixes from the token and return the stem.
|
24 |
+
|
25 |
+
:param token: The token that should be stemmed.
|
26 |
+
:type token: str
|
27 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/isri.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Natural Language Toolkit: The ISRI Arabic Stemmer
|
3 |
+
#
|
4 |
+
# Copyright (C) 2001-2023 NLTK Project
|
5 |
+
# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005)
|
6 |
+
# Author: Hosam Algasaier <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
"""
|
11 |
+
ISRI Arabic Stemmer
|
12 |
+
|
13 |
+
The algorithm for this stemmer is described in:
|
14 |
+
|
15 |
+
Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary.
|
16 |
+
Information Science Research Institute. University of Nevada, Las Vegas, USA.
|
17 |
+
|
18 |
+
The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features
|
19 |
+
with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root
|
20 |
+
dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than
|
21 |
+
returning the original unmodified word.
|
22 |
+
|
23 |
+
Additional adjustments were made to improve the algorithm:
|
24 |
+
|
25 |
+
1- Adding 60 stop words.
|
26 |
+
2- Adding the pattern (تفاعيل) to ISRI pattern set.
|
27 |
+
3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it
|
28 |
+
increases the word ambiguities and changes the original root.
|
29 |
+
|
30 |
+
"""
|
31 |
+
import re
|
32 |
+
|
33 |
+
from nltk.stem.api import StemmerI
|
34 |
+
|
35 |
+
|
36 |
+
class ISRIStemmer(StemmerI):
|
37 |
+
"""
|
38 |
+
ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary.
|
39 |
+
Information Science Research Institute. University of Nevada, Las Vegas, USA.
|
40 |
+
|
41 |
+
A few minor modifications have been made to ISRI basic algorithm.
|
42 |
+
See the source code of this module for more information.
|
43 |
+
|
44 |
+
isri.stem(token) returns Arabic root for the given token.
|
45 |
+
|
46 |
+
The ISRI Stemmer requires that all tokens have Unicode string types.
|
47 |
+
If you use Python IDLE on Arabic Windows you have to decode text first
|
48 |
+
using Arabic '1256' coding.
|
49 |
+
"""
|
50 |
+
|
51 |
+
def __init__(self):
|
52 |
+
# length three prefixes
|
53 |
+
self.p3 = [
|
54 |
+
"\u0643\u0627\u0644",
|
55 |
+
"\u0628\u0627\u0644",
|
56 |
+
"\u0648\u0644\u0644",
|
57 |
+
"\u0648\u0627\u0644",
|
58 |
+
]
|
59 |
+
|
60 |
+
# length two prefixes
|
61 |
+
self.p2 = ["\u0627\u0644", "\u0644\u0644"]
|
62 |
+
|
63 |
+
# length one prefixes
|
64 |
+
self.p1 = [
|
65 |
+
"\u0644",
|
66 |
+
"\u0628",
|
67 |
+
"\u0641",
|
68 |
+
"\u0633",
|
69 |
+
"\u0648",
|
70 |
+
"\u064a",
|
71 |
+
"\u062a",
|
72 |
+
"\u0646",
|
73 |
+
"\u0627",
|
74 |
+
]
|
75 |
+
|
76 |
+
# length three suffixes
|
77 |
+
self.s3 = [
|
78 |
+
"\u062a\u0645\u0644",
|
79 |
+
"\u0647\u0645\u0644",
|
80 |
+
"\u062a\u0627\u0646",
|
81 |
+
"\u062a\u064a\u0646",
|
82 |
+
"\u0643\u0645\u0644",
|
83 |
+
]
|
84 |
+
|
85 |
+
# length two suffixes
|
86 |
+
self.s2 = [
|
87 |
+
"\u0648\u0646",
|
88 |
+
"\u0627\u062a",
|
89 |
+
"\u0627\u0646",
|
90 |
+
"\u064a\u0646",
|
91 |
+
"\u062a\u0646",
|
92 |
+
"\u0643\u0645",
|
93 |
+
"\u0647\u0646",
|
94 |
+
"\u0646\u0627",
|
95 |
+
"\u064a\u0627",
|
96 |
+
"\u0647\u0627",
|
97 |
+
"\u062a\u0645",
|
98 |
+
"\u0643\u0646",
|
99 |
+
"\u0646\u064a",
|
100 |
+
"\u0648\u0627",
|
101 |
+
"\u0645\u0627",
|
102 |
+
"\u0647\u0645",
|
103 |
+
]
|
104 |
+
|
105 |
+
# length one suffixes
|
106 |
+
self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"]
|
107 |
+
|
108 |
+
# groups of length four patterns
|
109 |
+
self.pr4 = {
|
110 |
+
0: ["\u0645"],
|
111 |
+
1: ["\u0627"],
|
112 |
+
2: ["\u0627", "\u0648", "\u064A"],
|
113 |
+
3: ["\u0629"],
|
114 |
+
}
|
115 |
+
|
116 |
+
# Groups of length five patterns and length three roots
|
117 |
+
self.pr53 = {
|
118 |
+
0: ["\u0627", "\u062a"],
|
119 |
+
1: ["\u0627", "\u064a", "\u0648"],
|
120 |
+
2: ["\u0627", "\u062a", "\u0645"],
|
121 |
+
3: ["\u0645", "\u064a", "\u062a"],
|
122 |
+
4: ["\u0645", "\u062a"],
|
123 |
+
5: ["\u0627", "\u0648"],
|
124 |
+
6: ["\u0627", "\u0645"],
|
125 |
+
}
|
126 |
+
|
127 |
+
self.re_short_vowels = re.compile(r"[\u064B-\u0652]")
|
128 |
+
self.re_hamza = re.compile(r"[\u0621\u0624\u0626]")
|
129 |
+
self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]")
|
130 |
+
|
131 |
+
self.stop_words = [
|
132 |
+
"\u064a\u0643\u0648\u0646",
|
133 |
+
"\u0648\u0644\u064a\u0633",
|
134 |
+
"\u0648\u0643\u0627\u0646",
|
135 |
+
"\u0643\u0630\u0644\u0643",
|
136 |
+
"\u0627\u0644\u062a\u064a",
|
137 |
+
"\u0648\u0628\u064a\u0646",
|
138 |
+
"\u0639\u0644\u064a\u0647\u0627",
|
139 |
+
"\u0645\u0633\u0627\u0621",
|
140 |
+
"\u0627\u0644\u0630\u064a",
|
141 |
+
"\u0648\u0643\u0627\u0646\u062a",
|
142 |
+
"\u0648\u0644\u0643\u0646",
|
143 |
+
"\u0648\u0627\u0644\u062a\u064a",
|
144 |
+
"\u062a\u0643\u0648\u0646",
|
145 |
+
"\u0627\u0644\u064a\u0648\u0645",
|
146 |
+
"\u0627\u0644\u0644\u0630\u064a\u0646",
|
147 |
+
"\u0639\u0644\u064a\u0647",
|
148 |
+
"\u0643\u0627\u0646\u062a",
|
149 |
+
"\u0644\u0630\u0644\u0643",
|
150 |
+
"\u0623\u0645\u0627\u0645",
|
151 |
+
"\u0647\u0646\u0627\u0643",
|
152 |
+
"\u0645\u0646\u0647\u0627",
|
153 |
+
"\u0645\u0627\u0632\u0627\u0644",
|
154 |
+
"\u0644\u0627\u0632\u0627\u0644",
|
155 |
+
"\u0644\u0627\u064a\u0632\u0627\u0644",
|
156 |
+
"\u0645\u0627\u064a\u0632\u0627\u0644",
|
157 |
+
"\u0627\u0635\u0628\u062d",
|
158 |
+
"\u0623\u0635\u0628\u062d",
|
159 |
+
"\u0623\u0645\u0633\u0649",
|
160 |
+
"\u0627\u0645\u0633\u0649",
|
161 |
+
"\u0623\u0636\u062d\u0649",
|
162 |
+
"\u0627\u0636\u062d\u0649",
|
163 |
+
"\u0645\u0627\u0628\u0631\u062d",
|
164 |
+
"\u0645\u0627\u0641\u062a\u0626",
|
165 |
+
"\u0645\u0627\u0627\u0646\u0641\u0643",
|
166 |
+
"\u0644\u0627\u0633\u064a\u0645\u0627",
|
167 |
+
"\u0648\u0644\u0627\u064a\u0632\u0627\u0644",
|
168 |
+
"\u0627\u0644\u062d\u0627\u0644\u064a",
|
169 |
+
"\u0627\u0644\u064a\u0647\u0627",
|
170 |
+
"\u0627\u0644\u0630\u064a\u0646",
|
171 |
+
"\u0641\u0627\u0646\u0647",
|
172 |
+
"\u0648\u0627\u0644\u0630\u064a",
|
173 |
+
"\u0648\u0647\u0630\u0627",
|
174 |
+
"\u0644\u0647\u0630\u0627",
|
175 |
+
"\u0641\u0643\u0627\u0646",
|
176 |
+
"\u0633\u062a\u0643\u0648\u0646",
|
177 |
+
"\u0627\u0644\u064a\u0647",
|
178 |
+
"\u064a\u0645\u0643\u0646",
|
179 |
+
"\u0628\u0647\u0630\u0627",
|
180 |
+
"\u0627\u0644\u0630\u0649",
|
181 |
+
]
|
182 |
+
|
183 |
+
def stem(self, token):
|
184 |
+
"""
|
185 |
+
Stemming a word token using the ISRI stemmer.
|
186 |
+
"""
|
187 |
+
token = self.norm(
|
188 |
+
token, 1
|
189 |
+
) # remove diacritics which representing Arabic short vowels
|
190 |
+
if token in self.stop_words:
|
191 |
+
return token # exclude stop words from being processed
|
192 |
+
token = self.pre32(
|
193 |
+
token
|
194 |
+
) # remove length three and length two prefixes in this order
|
195 |
+
token = self.suf32(
|
196 |
+
token
|
197 |
+
) # remove length three and length two suffixes in this order
|
198 |
+
token = self.waw(
|
199 |
+
token
|
200 |
+
) # remove connective ‘و’ if it precedes a word beginning with ‘و’
|
201 |
+
token = self.norm(token, 2) # normalize initial hamza to bare alif
|
202 |
+
# if 4 <= word length <= 7, then stem; otherwise, no stemming
|
203 |
+
if len(token) == 4: # length 4 word
|
204 |
+
token = self.pro_w4(token)
|
205 |
+
elif len(token) == 5: # length 5 word
|
206 |
+
token = self.pro_w53(token)
|
207 |
+
token = self.end_w5(token)
|
208 |
+
elif len(token) == 6: # length 6 word
|
209 |
+
token = self.pro_w6(token)
|
210 |
+
token = self.end_w6(token)
|
211 |
+
elif len(token) == 7: # length 7 word
|
212 |
+
token = self.suf1(token)
|
213 |
+
if len(token) == 7:
|
214 |
+
token = self.pre1(token)
|
215 |
+
if len(token) == 6:
|
216 |
+
token = self.pro_w6(token)
|
217 |
+
token = self.end_w6(token)
|
218 |
+
return token
|
219 |
+
|
220 |
+
def norm(self, word, num=3):
|
221 |
+
"""
|
222 |
+
normalization:
|
223 |
+
num=1 normalize diacritics
|
224 |
+
num=2 normalize initial hamza
|
225 |
+
num=3 both 1&2
|
226 |
+
"""
|
227 |
+
if num == 1:
|
228 |
+
word = self.re_short_vowels.sub("", word)
|
229 |
+
elif num == 2:
|
230 |
+
word = self.re_initial_hamza.sub("\u0627", word)
|
231 |
+
elif num == 3:
|
232 |
+
word = self.re_short_vowels.sub("", word)
|
233 |
+
word = self.re_initial_hamza.sub("\u0627", word)
|
234 |
+
return word
|
235 |
+
|
236 |
+
def pre32(self, word):
|
237 |
+
"""remove length three and length two prefixes in this order"""
|
238 |
+
if len(word) >= 6:
|
239 |
+
for pre3 in self.p3:
|
240 |
+
if word.startswith(pre3):
|
241 |
+
return word[3:]
|
242 |
+
if len(word) >= 5:
|
243 |
+
for pre2 in self.p2:
|
244 |
+
if word.startswith(pre2):
|
245 |
+
return word[2:]
|
246 |
+
return word
|
247 |
+
|
248 |
+
def suf32(self, word):
|
249 |
+
"""remove length three and length two suffixes in this order"""
|
250 |
+
if len(word) >= 6:
|
251 |
+
for suf3 in self.s3:
|
252 |
+
if word.endswith(suf3):
|
253 |
+
return word[:-3]
|
254 |
+
if len(word) >= 5:
|
255 |
+
for suf2 in self.s2:
|
256 |
+
if word.endswith(suf2):
|
257 |
+
return word[:-2]
|
258 |
+
return word
|
259 |
+
|
260 |
+
def waw(self, word):
|
261 |
+
"""remove connective ‘و’ if it precedes a word beginning with ‘و’"""
|
262 |
+
if len(word) >= 4 and word[:2] == "\u0648\u0648":
|
263 |
+
word = word[1:]
|
264 |
+
return word
|
265 |
+
|
266 |
+
def pro_w4(self, word):
|
267 |
+
"""process length four patterns and extract length three roots"""
|
268 |
+
if word[0] in self.pr4[0]: # مفعل
|
269 |
+
word = word[1:]
|
270 |
+
elif word[1] in self.pr4[1]: # فاعل
|
271 |
+
word = word[:1] + word[2:]
|
272 |
+
elif word[2] in self.pr4[2]: # فعال - فعول - فعيل
|
273 |
+
word = word[:2] + word[3]
|
274 |
+
elif word[3] in self.pr4[3]: # فعلة
|
275 |
+
word = word[:-1]
|
276 |
+
else:
|
277 |
+
word = self.suf1(word) # do - normalize short sufix
|
278 |
+
if len(word) == 4:
|
279 |
+
word = self.pre1(word) # do - normalize short prefix
|
280 |
+
return word
|
281 |
+
|
282 |
+
def pro_w53(self, word):
|
283 |
+
"""process length five patterns and extract length three roots"""
|
284 |
+
if word[2] in self.pr53[0] and word[0] == "\u0627": # افتعل - افاعل
|
285 |
+
word = word[1] + word[3:]
|
286 |
+
elif word[3] in self.pr53[1] and word[0] == "\u0645": # مفعول - مفعال - مفعيل
|
287 |
+
word = word[1:3] + word[4]
|
288 |
+
elif word[0] in self.pr53[2] and word[4] == "\u0629": # مفعلة - تفعلة - افعلة
|
289 |
+
word = word[1:4]
|
290 |
+
elif word[0] in self.pr53[3] and word[2] == "\u062a": # مفتعل - يفتعل - تفتعل
|
291 |
+
word = word[1] + word[3:]
|
292 |
+
elif word[0] in self.pr53[4] and word[2] == "\u0627": # مفاعل - تفاعل
|
293 |
+
word = word[1] + word[3:]
|
294 |
+
elif word[2] in self.pr53[5] and word[4] == "\u0629": # فعولة - فعالة
|
295 |
+
word = word[:2] + word[3]
|
296 |
+
elif word[0] in self.pr53[6] and word[1] == "\u0646": # انفعل - منفعل
|
297 |
+
word = word[2:]
|
298 |
+
elif word[3] == "\u0627" and word[0] == "\u0627": # افعال
|
299 |
+
word = word[1:3] + word[4]
|
300 |
+
elif word[4] == "\u0646" and word[3] == "\u0627": # فعلان
|
301 |
+
word = word[:3]
|
302 |
+
elif word[3] == "\u064a" and word[0] == "\u062a": # تفعيل
|
303 |
+
word = word[1:3] + word[4]
|
304 |
+
elif word[3] == "\u0648" and word[1] == "\u0627": # فاعول
|
305 |
+
word = word[0] + word[2] + word[4]
|
306 |
+
elif word[2] == "\u0627" and word[1] == "\u0648": # فواعل
|
307 |
+
word = word[0] + word[3:]
|
308 |
+
elif word[3] == "\u0626" and word[2] == "\u0627": # فعائل
|
309 |
+
word = word[:2] + word[4]
|
310 |
+
elif word[4] == "\u0629" and word[1] == "\u0627": # فاعلة
|
311 |
+
word = word[0] + word[2:4]
|
312 |
+
elif word[4] == "\u064a" and word[2] == "\u0627": # فعالي
|
313 |
+
word = word[:2] + word[3]
|
314 |
+
else:
|
315 |
+
word = self.suf1(word) # do - normalize short sufix
|
316 |
+
if len(word) == 5:
|
317 |
+
word = self.pre1(word) # do - normalize short prefix
|
318 |
+
return word
|
319 |
+
|
320 |
+
def pro_w54(self, word):
|
321 |
+
"""process length five patterns and extract length four roots"""
|
322 |
+
if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل
|
323 |
+
word = word[1:]
|
324 |
+
elif word[4] == "\u0629": # فعللة
|
325 |
+
word = word[:4]
|
326 |
+
elif word[2] == "\u0627": # فعالل
|
327 |
+
word = word[:2] + word[3:]
|
328 |
+
return word
|
329 |
+
|
330 |
+
def end_w5(self, word):
|
331 |
+
"""ending step (word of length five)"""
|
332 |
+
if len(word) == 4:
|
333 |
+
word = self.pro_w4(word)
|
334 |
+
elif len(word) == 5:
|
335 |
+
word = self.pro_w54(word)
|
336 |
+
return word
|
337 |
+
|
338 |
+
def pro_w6(self, word):
|
339 |
+
"""process length six patterns and extract length three roots"""
|
340 |
+
if word.startswith("\u0627\u0633\u062a") or word.startswith(
|
341 |
+
"\u0645\u0633\u062a"
|
342 |
+
): # مستفعل - استفعل
|
343 |
+
word = word[3:]
|
344 |
+
elif (
|
345 |
+
word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629"
|
346 |
+
): # مفعالة
|
347 |
+
word = word[1:3] + word[4]
|
348 |
+
elif (
|
349 |
+
word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627"
|
350 |
+
): # افتعال
|
351 |
+
word = word[1] + word[3] + word[5]
|
352 |
+
elif (
|
353 |
+
word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4]
|
354 |
+
): # افعوعل
|
355 |
+
word = word[1] + word[4:]
|
356 |
+
elif (
|
357 |
+
word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a"
|
358 |
+
): # تفاعيل new pattern
|
359 |
+
word = word[1] + word[3] + word[5]
|
360 |
+
else:
|
361 |
+
word = self.suf1(word) # do - normalize short sufix
|
362 |
+
if len(word) == 6:
|
363 |
+
word = self.pre1(word) # do - normalize short prefix
|
364 |
+
return word
|
365 |
+
|
366 |
+
def pro_w64(self, word):
|
367 |
+
"""process length six patterns and extract length four roots"""
|
368 |
+
if word[0] == "\u0627" and word[4] == "\u0627": # افعلال
|
369 |
+
word = word[1:4] + word[5]
|
370 |
+
elif word.startswith("\u0645\u062a"): # متفعلل
|
371 |
+
word = word[2:]
|
372 |
+
return word
|
373 |
+
|
374 |
+
def end_w6(self, word):
|
375 |
+
"""ending step (word of length six)"""
|
376 |
+
if len(word) == 5:
|
377 |
+
word = self.pro_w53(word)
|
378 |
+
word = self.end_w5(word)
|
379 |
+
elif len(word) == 6:
|
380 |
+
word = self.pro_w64(word)
|
381 |
+
return word
|
382 |
+
|
383 |
+
def suf1(self, word):
|
384 |
+
"""normalize short sufix"""
|
385 |
+
for sf1 in self.s1:
|
386 |
+
if word.endswith(sf1):
|
387 |
+
return word[:-1]
|
388 |
+
return word
|
389 |
+
|
390 |
+
def pre1(self, word):
|
391 |
+
"""normalize short prefix"""
|
392 |
+
for sp1 in self.p1:
|
393 |
+
if word.startswith(sp1):
|
394 |
+
return word[1:]
|
395 |
+
return word
|
env-llmeval/lib/python3.10/site-packages/nltk/stem/rslp.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: RSLP Stemmer
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Tiago Tresoldi <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
# This code is based on the algorithm presented in the paper "A Stemming
|
9 |
+
# Algorithm for the Portuguese Language" by Viviane Moreira Orengo and
|
10 |
+
# Christian Huyck, which unfortunately I had no access to. The code is a
|
11 |
+
# Python version, with some minor modifications of mine, to the description
|
12 |
+
# presented at https://www.webcitation.org/5NnvdIzOb and to the C source code
|
13 |
+
# available at http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html.
|
14 |
+
# Please note that this stemmer is intended for demonstration and educational
|
15 |
+
# purposes only. Feel free to write me for any comments, including the
|
16 |
+
# development of a different and/or better stemmer for Portuguese. I also
|
17 |
+
# suggest using NLTK's mailing list for Portuguese for any discussion.
|
18 |
+
|
19 |
+
# Este código é baseado no algoritmo apresentado no artigo "A Stemming
|
20 |
+
# Algorithm for the Portuguese Language" de Viviane Moreira Orengo e
|
21 |
+
# Christian Huyck, o qual infelizmente não tive a oportunidade de ler. O
|
22 |
+
# código é uma conversão para Python, com algumas pequenas modificações
|
23 |
+
# minhas, daquele apresentado em https://www.webcitation.org/5NnvdIzOb e do
|
24 |
+
# código para linguagem C disponível em
|
25 |
+
# http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. Por favor,
|
26 |
+
# lembre-se de que este stemmer foi desenvolvido com finalidades unicamente
|
27 |
+
# de demonstração e didáticas. Sinta-se livre para me escrever para qualquer
|
28 |
+
# comentário, inclusive sobre o desenvolvimento de um stemmer diferente
|
29 |
+
# e/ou melhor para o português. Também sugiro utilizar-se a lista de discussão
|
30 |
+
# do NLTK para o português para qualquer debate.
|
31 |
+
|
32 |
+
from nltk.data import load
|
33 |
+
from nltk.stem.api import StemmerI
|
34 |
+
|
35 |
+
|
36 |
+
class RSLPStemmer(StemmerI):
|
37 |
+
"""
|
38 |
+
A stemmer for Portuguese.
|
39 |
+
|
40 |
+
>>> from nltk.stem import RSLPStemmer
|
41 |
+
>>> st = RSLPStemmer()
|
42 |
+
>>> # opening lines of Erico Verissimo's "Música ao Longe"
|
43 |
+
>>> text = '''
|
44 |
+
... Clarissa risca com giz no quadro-negro a paisagem que os alunos
|
45 |
+
... devem copiar . Uma casinha de porta e janela , em cima duma
|
46 |
+
... coxilha .'''
|
47 |
+
>>> for token in text.split(): # doctest: +NORMALIZE_WHITESPACE
|
48 |
+
... print(st.stem(token))
|
49 |
+
clariss risc com giz no quadro-negr a pais que os alun dev copi .
|
50 |
+
uma cas de port e janel , em cim dum coxilh .
|
51 |
+
"""
|
52 |
+
|
53 |
+
def __init__(self):
|
54 |
+
self._model = []
|
55 |
+
|
56 |
+
self._model.append(self.read_rule("step0.pt"))
|
57 |
+
self._model.append(self.read_rule("step1.pt"))
|
58 |
+
self._model.append(self.read_rule("step2.pt"))
|
59 |
+
self._model.append(self.read_rule("step3.pt"))
|
60 |
+
self._model.append(self.read_rule("step4.pt"))
|
61 |
+
self._model.append(self.read_rule("step5.pt"))
|
62 |
+
self._model.append(self.read_rule("step6.pt"))
|
63 |
+
|
64 |
+
def read_rule(self, filename):
|
65 |
+
rules = load("nltk:stemmers/rslp/" + filename, format="raw").decode("utf8")
|
66 |
+
lines = rules.split("\n")
|
67 |
+
|
68 |
+
lines = [line for line in lines if line != ""] # remove blank lines
|
69 |
+
lines = [line for line in lines if line[0] != "#"] # remove comments
|
70 |
+
|
71 |
+
# NOTE: a simple but ugly hack to make this parser happy with double '\t's
|
72 |
+
lines = [line.replace("\t\t", "\t") for line in lines]
|
73 |
+
|
74 |
+
# parse rules
|
75 |
+
rules = []
|
76 |
+
for line in lines:
|
77 |
+
rule = []
|
78 |
+
tokens = line.split("\t")
|
79 |
+
|
80 |
+
# text to be searched for at the end of the string
|
81 |
+
rule.append(tokens[0][1:-1]) # remove quotes
|
82 |
+
|
83 |
+
# minimum stem size to perform the replacement
|
84 |
+
rule.append(int(tokens[1]))
|
85 |
+
|
86 |
+
# text to be replaced into
|
87 |
+
rule.append(tokens[2][1:-1]) # remove quotes
|
88 |
+
|
89 |
+
# exceptions to this rule
|
90 |
+
rule.append([token[1:-1] for token in tokens[3].split(",")])
|
91 |
+
|
92 |
+
# append to the results
|
93 |
+
rules.append(rule)
|
94 |
+
|
95 |
+
return rules
|
96 |
+
|
97 |
+
def stem(self, word):
|
98 |
+
word = word.lower()
|
99 |
+
|
100 |
+
# the word ends in 's'? apply rule for plural reduction
|
101 |
+
if word[-1] == "s":
|
102 |
+
word = self.apply_rule(word, 0)
|
103 |
+
|
104 |
+
# the word ends in 'a'? apply rule for feminine reduction
|
105 |
+
if word[-1] == "a":
|
106 |
+
word = self.apply_rule(word, 1)
|
107 |
+
|
108 |
+
# augmentative reduction
|
109 |
+
word = self.apply_rule(word, 3)
|
110 |
+
|
111 |
+
# adverb reduction
|
112 |
+
word = self.apply_rule(word, 2)
|
113 |
+
|
114 |
+
# noun reduction
|
115 |
+
prev_word = word
|
116 |
+
word = self.apply_rule(word, 4)
|
117 |
+
if word == prev_word:
|
118 |
+
# verb reduction
|
119 |
+
prev_word = word
|
120 |
+
word = self.apply_rule(word, 5)
|
121 |
+
if word == prev_word:
|
122 |
+
# vowel removal
|
123 |
+
word = self.apply_rule(word, 6)
|
124 |
+
|
125 |
+
return word
|
126 |
+
|
127 |
+
def apply_rule(self, word, rule_index):
|
128 |
+
rules = self._model[rule_index]
|
129 |
+
for rule in rules:
|
130 |
+
suffix_length = len(rule[0])
|
131 |
+
if word[-suffix_length:] == rule[0]: # if suffix matches
|
132 |
+
if len(word) >= suffix_length + rule[1]: # if we have minimum size
|
133 |
+
if word not in rule[3]: # if not an exception
|
134 |
+
word = word[:-suffix_length] + rule[2]
|
135 |
+
break
|
136 |
+
|
137 |
+
return word
|
env-llmeval/lib/python3.10/site-packages/nltk/test/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Unit tests for the NLTK modules. These tests are intended to ensure
|
10 |
+
that source code changes don't accidentally introduce bugs.
|
11 |
+
For instructions, please see:
|
12 |
+
|
13 |
+
../../web/dev/local_testing.rst
|
14 |
+
|
15 |
+
https://github.com/nltk/nltk/blob/develop/web/dev/local_testing.rst
|
16 |
+
|
17 |
+
|
18 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/nltk/test/all.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test suite that runs all NLTK tests.
|
2 |
+
|
3 |
+
This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
|
4 |
+
project's ``setup-eggs.py`` file. Here, we create a test suite that
|
5 |
+
runs all of our doctests, and return it for processing by the setuptools
|
6 |
+
test harness.
|
7 |
+
|
8 |
+
"""
|
9 |
+
import doctest
|
10 |
+
import os.path
|
11 |
+
import unittest
|
12 |
+
from glob import glob
|
13 |
+
|
14 |
+
|
15 |
+
def additional_tests():
|
16 |
+
# print("here-000000000000000")
|
17 |
+
# print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest')))
|
18 |
+
dir = os.path.dirname(__file__)
|
19 |
+
paths = glob(os.path.join(dir, "*.doctest"))
|
20 |
+
files = [os.path.basename(path) for path in paths]
|
21 |
+
return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
|
22 |
+
|
23 |
+
|
24 |
+
# if os.path.split(path)[-1] != 'index.rst'
|
25 |
+
# skips time-dependent doctest in index.rst
|
env-llmeval/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================
|
5 |
+
Combinatory Categorial Grammar with semantics
|
6 |
+
==============================================
|
7 |
+
|
8 |
+
-----
|
9 |
+
Chart
|
10 |
+
-----
|
11 |
+
|
12 |
+
|
13 |
+
>>> from nltk.ccg import chart, lexicon
|
14 |
+
>>> from nltk.ccg.chart import printCCGDerivation
|
15 |
+
|
16 |
+
No semantics
|
17 |
+
-------------------
|
18 |
+
|
19 |
+
>>> lex = lexicon.fromstring('''
|
20 |
+
... :- S, NP, N
|
21 |
+
... She => NP
|
22 |
+
... has => (S\\NP)/NP
|
23 |
+
... books => NP
|
24 |
+
... ''',
|
25 |
+
... False)
|
26 |
+
|
27 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
28 |
+
>>> parses = list(parser.parse("She has books".split()))
|
29 |
+
>>> print(str(len(parses)) + " parses")
|
30 |
+
3 parses
|
31 |
+
|
32 |
+
>>> printCCGDerivation(parses[0])
|
33 |
+
She has books
|
34 |
+
NP ((S\NP)/NP) NP
|
35 |
+
-------------------->
|
36 |
+
(S\NP)
|
37 |
+
-------------------------<
|
38 |
+
S
|
39 |
+
|
40 |
+
>>> printCCGDerivation(parses[1])
|
41 |
+
She has books
|
42 |
+
NP ((S\NP)/NP) NP
|
43 |
+
----->T
|
44 |
+
(S/(S\NP))
|
45 |
+
-------------------->
|
46 |
+
(S\NP)
|
47 |
+
------------------------->
|
48 |
+
S
|
49 |
+
|
50 |
+
|
51 |
+
>>> printCCGDerivation(parses[2])
|
52 |
+
She has books
|
53 |
+
NP ((S\NP)/NP) NP
|
54 |
+
----->T
|
55 |
+
(S/(S\NP))
|
56 |
+
------------------>B
|
57 |
+
(S/NP)
|
58 |
+
------------------------->
|
59 |
+
S
|
60 |
+
|
61 |
+
Simple semantics
|
62 |
+
-------------------
|
63 |
+
|
64 |
+
>>> lex = lexicon.fromstring('''
|
65 |
+
... :- S, NP, N
|
66 |
+
... She => NP {she}
|
67 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
68 |
+
... a => NP/N {\\P.exists z.P(z)}
|
69 |
+
... book => N {book}
|
70 |
+
... ''',
|
71 |
+
... True)
|
72 |
+
|
73 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
74 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
75 |
+
>>> print(str(len(parses)) + " parses")
|
76 |
+
7 parses
|
77 |
+
|
78 |
+
>>> printCCGDerivation(parses[0])
|
79 |
+
She has a book
|
80 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
81 |
+
------------------------------------->
|
82 |
+
NP {exists z.book(z)}
|
83 |
+
------------------------------------------------------------------->
|
84 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
85 |
+
-----------------------------------------------------------------------------<
|
86 |
+
S {have(she,exists z.book(z))}
|
87 |
+
|
88 |
+
>>> printCCGDerivation(parses[1])
|
89 |
+
She has a book
|
90 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
91 |
+
--------------------------------------------------------->B
|
92 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
93 |
+
------------------------------------------------------------------->
|
94 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
95 |
+
-----------------------------------------------------------------------------<
|
96 |
+
S {have(she,exists z.book(z))}
|
97 |
+
|
98 |
+
>>> printCCGDerivation(parses[2])
|
99 |
+
She has a book
|
100 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
101 |
+
---------->T
|
102 |
+
(S/(S\NP)) {\F.F(she)}
|
103 |
+
------------------------------------->
|
104 |
+
NP {exists z.book(z)}
|
105 |
+
------------------------------------------------------------------->
|
106 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
107 |
+
----------------------------------------------------------------------------->
|
108 |
+
S {have(she,exists z.book(z))}
|
109 |
+
|
110 |
+
>>> printCCGDerivation(parses[3])
|
111 |
+
She has a book
|
112 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
113 |
+
---------->T
|
114 |
+
(S/(S\NP)) {\F.F(she)}
|
115 |
+
--------------------------------------------------------->B
|
116 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
117 |
+
------------------------------------------------------------------->
|
118 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
119 |
+
----------------------------------------------------------------------------->
|
120 |
+
S {have(she,exists z.book(z))}
|
121 |
+
|
122 |
+
>>> printCCGDerivation(parses[4])
|
123 |
+
She has a book
|
124 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
125 |
+
---------->T
|
126 |
+
(S/(S\NP)) {\F.F(she)}
|
127 |
+
---------------------------------------->B
|
128 |
+
(S/NP) {\x.have(she,x)}
|
129 |
+
------------------------------------->
|
130 |
+
NP {exists z.book(z)}
|
131 |
+
----------------------------------------------------------------------------->
|
132 |
+
S {have(she,exists z.book(z))}
|
133 |
+
|
134 |
+
>>> printCCGDerivation(parses[5])
|
135 |
+
She has a book
|
136 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
137 |
+
---------->T
|
138 |
+
(S/(S\NP)) {\F.F(she)}
|
139 |
+
--------------------------------------------------------->B
|
140 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
141 |
+
------------------------------------------------------------------->B
|
142 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
143 |
+
----------------------------------------------------------------------------->
|
144 |
+
S {have(she,exists z.book(z))}
|
145 |
+
|
146 |
+
>>> printCCGDerivation(parses[6])
|
147 |
+
She has a book
|
148 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
149 |
+
---------->T
|
150 |
+
(S/(S\NP)) {\F.F(she)}
|
151 |
+
---------------------------------------->B
|
152 |
+
(S/NP) {\x.have(she,x)}
|
153 |
+
------------------------------------------------------------------->B
|
154 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
155 |
+
----------------------------------------------------------------------------->
|
156 |
+
S {have(she,exists z.book(z))}
|
157 |
+
|
158 |
+
Complex semantics
|
159 |
+
-------------------
|
160 |
+
|
161 |
+
>>> lex = lexicon.fromstring('''
|
162 |
+
... :- S, NP, N
|
163 |
+
... She => NP {she}
|
164 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
165 |
+
... a => ((S\\NP)\\((S\\NP)/NP))/N {\\P R x.(exists z.P(z) & R(z,x))}
|
166 |
+
... book => N {book}
|
167 |
+
... ''',
|
168 |
+
... True)
|
169 |
+
|
170 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
171 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
172 |
+
>>> print(str(len(parses)) + " parses")
|
173 |
+
2 parses
|
174 |
+
|
175 |
+
>>> printCCGDerivation(parses[0])
|
176 |
+
She has a book
|
177 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
178 |
+
---------------------------------------------------------------------->
|
179 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
180 |
+
----------------------------------------------------------------------------------------------------<
|
181 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
182 |
+
--------------------------------------------------------------------------------------------------------------<
|
183 |
+
S {(exists z.book(z) & have(she,z))}
|
184 |
+
|
185 |
+
>>> printCCGDerivation(parses[1])
|
186 |
+
She has a book
|
187 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
188 |
+
---------->T
|
189 |
+
(S/(S\NP)) {\F.F(she)}
|
190 |
+
---------------------------------------------------------------------->
|
191 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
192 |
+
----------------------------------------------------------------------------------------------------<
|
193 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
194 |
+
-------------------------------------------------------------------------------------------------------------->
|
195 |
+
S {(exists z.book(z) & have(she,z))}
|
196 |
+
|
197 |
+
Using conjunctions
|
198 |
+
---------------------
|
199 |
+
|
200 |
+
# TODO: The semantics of "and" should have been more flexible
|
201 |
+
>>> lex = lexicon.fromstring('''
|
202 |
+
... :- S, NP, N
|
203 |
+
... I => NP {I}
|
204 |
+
... cook => (S\\NP)/NP {\\x y.cook(x,y)}
|
205 |
+
... and => var\\.,var/.,var {\\P Q x y.(P(x,y) & Q(x,y))}
|
206 |
+
... eat => (S\\NP)/NP {\\x y.eat(x,y)}
|
207 |
+
... the => NP/N {\\x.the(x)}
|
208 |
+
... bacon => N {bacon}
|
209 |
+
... ''',
|
210 |
+
... True)
|
211 |
+
|
212 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
213 |
+
>>> parses = list(parser.parse("I cook and eat the bacon".split()))
|
214 |
+
>>> print(str(len(parses)) + " parses")
|
215 |
+
7 parses
|
216 |
+
|
217 |
+
>>> printCCGDerivation(parses[0])
|
218 |
+
I cook and eat the bacon
|
219 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
220 |
+
------------------------------------------------------------------------------------->
|
221 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
222 |
+
-------------------------------------------------------------------------------------------------------------------<
|
223 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
224 |
+
------------------------------->
|
225 |
+
NP {the(bacon)}
|
226 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
227 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
228 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
229 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
230 |
+
|
231 |
+
>>> printCCGDerivation(parses[1])
|
232 |
+
I cook and eat the bacon
|
233 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
234 |
+
------------------------------------------------------------------------------------->
|
235 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
236 |
+
-------------------------------------------------------------------------------------------------------------------<
|
237 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
238 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
239 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
240 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
241 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
242 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
243 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
244 |
+
|
245 |
+
>>> printCCGDerivation(parses[2])
|
246 |
+
I cook and eat the bacon
|
247 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
248 |
+
-------->T
|
249 |
+
(S/(S\NP)) {\F.F(I)}
|
250 |
+
------------------------------------------------------------------------------------->
|
251 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
252 |
+
-------------------------------------------------------------------------------------------------------------------<
|
253 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
254 |
+
------------------------------->
|
255 |
+
NP {the(bacon)}
|
256 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
257 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
258 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
259 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
260 |
+
|
261 |
+
>>> printCCGDerivation(parses[3])
|
262 |
+
I cook and eat the bacon
|
263 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
264 |
+
-------->T
|
265 |
+
(S/(S\NP)) {\F.F(I)}
|
266 |
+
------------------------------------------------------------------------------------->
|
267 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
268 |
+
-------------------------------------------------------------------------------------------------------------------<
|
269 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
270 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
271 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
272 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
273 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
274 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
275 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
276 |
+
|
277 |
+
>>> printCCGDerivation(parses[4])
|
278 |
+
I cook and eat the bacon
|
279 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
280 |
+
-------->T
|
281 |
+
(S/(S\NP)) {\F.F(I)}
|
282 |
+
------------------------------------------------------------------------------------->
|
283 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
284 |
+
-------------------------------------------------------------------------------------------------------------------<
|
285 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
286 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
287 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
288 |
+
------------------------------->
|
289 |
+
NP {the(bacon)}
|
290 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
291 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
292 |
+
|
293 |
+
>>> printCCGDerivation(parses[5])
|
294 |
+
I cook and eat the bacon
|
295 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
296 |
+
-------->T
|
297 |
+
(S/(S\NP)) {\F.F(I)}
|
298 |
+
------------------------------------------------------------------------------------->
|
299 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
300 |
+
-------------------------------------------------------------------------------------------------------------------<
|
301 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
302 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
303 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
304 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
305 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
306 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
307 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
308 |
+
|
309 |
+
>>> printCCGDerivation(parses[6])
|
310 |
+
I cook and eat the bacon
|
311 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
312 |
+
-------->T
|
313 |
+
(S/(S\NP)) {\F.F(I)}
|
314 |
+
------------------------------------------------------------------------------------->
|
315 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
316 |
+
-------------------------------------------------------------------------------------------------------------------<
|
317 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
318 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
319 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
320 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
321 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
322 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
323 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
324 |
+
|
325 |
+
Tests from published papers
|
326 |
+
------------------------------
|
327 |
+
|
328 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
329 |
+
|
330 |
+
>>> lex = lexicon.fromstring('''
|
331 |
+
... :- S, NP
|
332 |
+
... I => NP {I}
|
333 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
334 |
+
... them => NP {them}
|
335 |
+
... money => NP {money}
|
336 |
+
... ''',
|
337 |
+
... True)
|
338 |
+
|
339 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
340 |
+
>>> parses = list(parser.parse("I give them money".split()))
|
341 |
+
>>> print(str(len(parses)) + " parses")
|
342 |
+
3 parses
|
343 |
+
|
344 |
+
>>> printCCGDerivation(parses[0])
|
345 |
+
I give them money
|
346 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
347 |
+
-------------------------------------------------->
|
348 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
349 |
+
-------------------------------------------------------------->
|
350 |
+
(S\NP) {\z.give(money,them,z)}
|
351 |
+
----------------------------------------------------------------------<
|
352 |
+
S {give(money,them,I)}
|
353 |
+
|
354 |
+
>>> printCCGDerivation(parses[1])
|
355 |
+
I give them money
|
356 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
357 |
+
-------->T
|
358 |
+
(S/(S\NP)) {\F.F(I)}
|
359 |
+
-------------------------------------------------->
|
360 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
361 |
+
-------------------------------------------------------------->
|
362 |
+
(S\NP) {\z.give(money,them,z)}
|
363 |
+
---------------------------------------------------------------------->
|
364 |
+
S {give(money,them,I)}
|
365 |
+
|
366 |
+
|
367 |
+
>>> printCCGDerivation(parses[2])
|
368 |
+
I give them money
|
369 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
370 |
+
-------->T
|
371 |
+
(S/(S\NP)) {\F.F(I)}
|
372 |
+
-------------------------------------------------->
|
373 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
374 |
+
---------------------------------------------------------->B
|
375 |
+
(S/NP) {\y.give(y,them,I)}
|
376 |
+
---------------------------------------------------------------------->
|
377 |
+
S {give(money,them,I)}
|
378 |
+
|
379 |
+
|
380 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
381 |
+
|
382 |
+
>>> lex = lexicon.fromstring('''
|
383 |
+
... :- N, NP, S
|
384 |
+
... money => N {money}
|
385 |
+
... that => (N\\N)/(S/NP) {\\P Q x.(P(x) & Q(x))}
|
386 |
+
... I => NP {I}
|
387 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
388 |
+
... them => NP {them}
|
389 |
+
... ''',
|
390 |
+
... True)
|
391 |
+
|
392 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
393 |
+
>>> parses = list(parser.parse("money that I give them".split()))
|
394 |
+
>>> print(str(len(parses)) + " parses")
|
395 |
+
3 parses
|
396 |
+
|
397 |
+
>>> printCCGDerivation(parses[0])
|
398 |
+
money that I give them
|
399 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
400 |
+
-------->T
|
401 |
+
(S/(S\NP)) {\F.F(I)}
|
402 |
+
-------------------------------------------------->
|
403 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
404 |
+
---------------------------------------------------------->B
|
405 |
+
(S/NP) {\y.give(y,them,I)}
|
406 |
+
------------------------------------------------------------------------------------------------->
|
407 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
408 |
+
------------------------------------------------------------------------------------------------------------<
|
409 |
+
N {\x.(give(x,them,I) & money(x))}
|
410 |
+
|
411 |
+
>>> printCCGDerivation(parses[1])
|
412 |
+
money that I give them
|
413 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
414 |
+
----------->T
|
415 |
+
(N/(N\N)) {\F.F(money)}
|
416 |
+
-------->T
|
417 |
+
(S/(S\NP)) {\F.F(I)}
|
418 |
+
-------------------------------------------------->
|
419 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
420 |
+
---------------------------------------------------------->B
|
421 |
+
(S/NP) {\y.give(y,them,I)}
|
422 |
+
------------------------------------------------------------------------------------------------->
|
423 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
424 |
+
------------------------------------------------------------------------------------------------------------>
|
425 |
+
N {\x.(give(x,them,I) & money(x))}
|
426 |
+
|
427 |
+
>>> printCCGDerivation(parses[2])
|
428 |
+
money that I give them
|
429 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
430 |
+
----------->T
|
431 |
+
(N/(N\N)) {\F.F(money)}
|
432 |
+
-------------------------------------------------->B
|
433 |
+
(N/(S/NP)) {\P x.(P(x) & money(x))}
|
434 |
+
-------->T
|
435 |
+
(S/(S\NP)) {\F.F(I)}
|
436 |
+
-------------------------------------------------->
|
437 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
438 |
+
---------------------------------------------------------->B
|
439 |
+
(S/NP) {\y.give(y,them,I)}
|
440 |
+
------------------------------------------------------------------------------------------------------------>
|
441 |
+
N {\x.(give(x,them,I) & money(x))}
|
442 |
+
|
443 |
+
|
444 |
+
-------
|
445 |
+
Lexicon
|
446 |
+
-------
|
447 |
+
|
448 |
+
>>> from nltk.ccg import lexicon
|
449 |
+
|
450 |
+
Parse lexicon with semantics
|
451 |
+
|
452 |
+
>>> print(str(lexicon.fromstring(
|
453 |
+
... '''
|
454 |
+
... :- S,NP
|
455 |
+
...
|
456 |
+
... IntransVsg :: S\\NP[sg]
|
457 |
+
...
|
458 |
+
... sleeps => IntransVsg {\\x.sleep(x)}
|
459 |
+
... eats => S\\NP[sg]/NP {\\x y.eat(x,y)}
|
460 |
+
...
|
461 |
+
... and => var\\var/var {\\x y.x & y}
|
462 |
+
... ''',
|
463 |
+
... True
|
464 |
+
... )))
|
465 |
+
and => ((_var0\_var0)/_var0) {(\x y.x & y)}
|
466 |
+
eats => ((S\NP['sg'])/NP) {\x y.eat(x,y)}
|
467 |
+
sleeps => (S\NP['sg']) {\x.sleep(x)}
|
468 |
+
|
469 |
+
Parse lexicon without semantics
|
470 |
+
|
471 |
+
>>> print(str(lexicon.fromstring(
|
472 |
+
... '''
|
473 |
+
... :- S,NP
|
474 |
+
...
|
475 |
+
... IntransVsg :: S\\NP[sg]
|
476 |
+
...
|
477 |
+
... sleeps => IntransVsg
|
478 |
+
... eats => S\\NP[sg]/NP {sem=\\x y.eat(x,y)}
|
479 |
+
...
|
480 |
+
... and => var\\var/var
|
481 |
+
... ''',
|
482 |
+
... False
|
483 |
+
... )))
|
484 |
+
and => ((_var0\_var0)/_var0)
|
485 |
+
eats => ((S\NP['sg'])/NP)
|
486 |
+
sleeps => (S\NP['sg'])
|
487 |
+
|
488 |
+
Semantics are missing
|
489 |
+
|
490 |
+
>>> print(str(lexicon.fromstring(
|
491 |
+
... '''
|
492 |
+
... :- S,NP
|
493 |
+
...
|
494 |
+
... eats => S\\NP[sg]/NP
|
495 |
+
... ''',
|
496 |
+
... True
|
497 |
+
... )))
|
498 |
+
Traceback (most recent call last):
|
499 |
+
...
|
500 |
+
AssertionError: eats => S\NP[sg]/NP must contain semantics because include_semantics is set to True
|
501 |
+
|
502 |
+
|
503 |
+
------------------------------------
|
504 |
+
CCG combinator semantics computation
|
505 |
+
------------------------------------
|
506 |
+
|
507 |
+
>>> from nltk.sem.logic import *
|
508 |
+
>>> from nltk.ccg.logic import *
|
509 |
+
|
510 |
+
>>> read_expr = Expression.fromstring
|
511 |
+
|
512 |
+
Compute semantics from function application
|
513 |
+
|
514 |
+
>>> print(str(compute_function_semantics(read_expr(r'\x.P(x)'), read_expr(r'book'))))
|
515 |
+
P(book)
|
516 |
+
|
517 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'read'))))
|
518 |
+
read(book)
|
519 |
+
|
520 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'\x.read(x)'))))
|
521 |
+
read(book)
|
522 |
+
|
523 |
+
Compute semantics from composition
|
524 |
+
|
525 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'\x.Q(x)'))))
|
526 |
+
\x.P(Q(x))
|
527 |
+
|
528 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
529 |
+
Traceback (most recent call last):
|
530 |
+
...
|
531 |
+
AssertionError: `read` must be a lambda expression
|
532 |
+
|
533 |
+
Compute semantics from substitution
|
534 |
+
|
535 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x y.P(x,y)'), read_expr(r'\x.Q(x)'))))
|
536 |
+
\x.P(x,Q(x))
|
537 |
+
|
538 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
539 |
+
Traceback (most recent call last):
|
540 |
+
...
|
541 |
+
AssertionError: `\x.P(x)` must be a lambda expression with 2 arguments
|
542 |
+
|
543 |
+
Compute type-raise semantics
|
544 |
+
|
545 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.P(x)'))))
|
546 |
+
\F x.F(P(x))
|
547 |
+
|
548 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.F(x)'))))
|
549 |
+
\F1 x.F1(F(x))
|
550 |
+
|
551 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x y z.P(x,y,z)'))))
|
552 |
+
\F x y z.F(P(x,y,z))
|
env-llmeval/lib/python3.10/site-packages/nltk/test/childes.doctest
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
=======================
|
2 |
+
CHILDES Corpus Readers
|
3 |
+
=======================
|
4 |
+
|
5 |
+
Read the XML version of the CHILDES corpus.
|
6 |
+
|
7 |
+
Setup
|
8 |
+
=====
|
9 |
+
|
10 |
+
>>> from nltk.test.childes_fixt import setup_module
|
11 |
+
>>> setup_module()
|
12 |
+
|
13 |
+
How to use CHILDESCorpusReader
|
14 |
+
==============================
|
15 |
+
|
16 |
+
Read the CHILDESCorpusReader class and read the CHILDES corpus saved in
|
17 |
+
the nltk_data directory.
|
18 |
+
|
19 |
+
>>> import nltk
|
20 |
+
>>> from nltk.corpus.reader import CHILDESCorpusReader
|
21 |
+
>>> corpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')
|
22 |
+
|
23 |
+
Reading files in the Valian corpus (Valian, 1991).
|
24 |
+
|
25 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
26 |
+
>>> valian.fileids()
|
27 |
+
['Valian/01a.xml', 'Valian/01b.xml', 'Valian/02a.xml', 'Valian/02b.xml',...
|
28 |
+
|
29 |
+
Count the number of files
|
30 |
+
|
31 |
+
>>> len(valian.fileids())
|
32 |
+
43
|
33 |
+
|
34 |
+
Printing properties of the corpus files.
|
35 |
+
|
36 |
+
>>> corpus_data = valian.corpus(valian.fileids())
|
37 |
+
>>> print(corpus_data[0]['Lang'])
|
38 |
+
eng
|
39 |
+
>>> for key in sorted(corpus_data[0].keys()):
|
40 |
+
... print(key, ": ", corpus_data[0][key])
|
41 |
+
Corpus : valian
|
42 |
+
Date : 1986-03-04
|
43 |
+
Id : 01a
|
44 |
+
Lang : eng
|
45 |
+
Version : 2.0.1
|
46 |
+
{http://www.w3.org/2001/XMLSchema-instance}schemaLocation : http://www.talkbank.org/ns/talkbank http://talkbank.org/software/talkbank.xsd
|
47 |
+
|
48 |
+
Printing information of participants of the corpus. The most common codes for
|
49 |
+
the participants are 'CHI' (target child), 'MOT' (mother), and 'INV' (investigator).
|
50 |
+
|
51 |
+
>>> corpus_participants = valian.participants(valian.fileids())
|
52 |
+
>>> for this_corpus_participants in corpus_participants[:2]:
|
53 |
+
... for key in sorted(this_corpus_participants.keys()):
|
54 |
+
... dct = this_corpus_participants[key]
|
55 |
+
... print(key, ": ", [(k, dct[k]) for k in sorted(dct.keys())])
|
56 |
+
CHI : [('age', 'P2Y1M3D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
57 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
58 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
59 |
+
CHI : [('age', 'P2Y1M12D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
60 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
61 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
62 |
+
|
63 |
+
printing words.
|
64 |
+
|
65 |
+
>>> valian.words('Valian/01a.xml')
|
66 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
67 |
+
|
68 |
+
printing sentences.
|
69 |
+
|
70 |
+
>>> valian.sents('Valian/01a.xml')
|
71 |
+
[['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname',
|
72 |
+
'and', 'it', 'is', 'March', 'fourth', 'I', 'believe', 'and', 'when',
|
73 |
+
'was', "Parent's", 'birthday'], ["Child's"], ['oh', "I'm", 'sorry'],
|
74 |
+
["that's", 'okay'], ...
|
75 |
+
|
76 |
+
You can specify the participants with the argument *speaker*.
|
77 |
+
|
78 |
+
>>> valian.words('Valian/01a.xml',speaker=['INV'])
|
79 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
80 |
+
>>> valian.words('Valian/01a.xml',speaker=['MOT'])
|
81 |
+
["Child's", "that's", 'okay', 'February', 'first', 'nineteen', ...
|
82 |
+
>>> valian.words('Valian/01a.xml',speaker=['CHI'])
|
83 |
+
['tape', 'it', 'up', 'and', 'two', 'tape', 'players', 'have',...
|
84 |
+
|
85 |
+
|
86 |
+
tagged_words() and tagged_sents() return the usual (word,pos) tuple lists.
|
87 |
+
POS tags in the CHILDES are automatically assigned by MOR and POST programs
|
88 |
+
(MacWhinney, 2000).
|
89 |
+
|
90 |
+
>>> valian.tagged_words('Valian/01a.xml')[:30]
|
91 |
+
[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
92 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
93 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
94 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
95 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n'), ("Child's", 'n:prop'),
|
96 |
+
('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj'), ("that's", 'pro:dem'),
|
97 |
+
('okay', 'adj'), ('February', 'n:prop'), ('first', 'adj'),
|
98 |
+
('nineteen', 'det:num'), ('eighty', 'det:num'), ('four', 'det:num')]
|
99 |
+
|
100 |
+
>>> valian.tagged_sents('Valian/01a.xml')[:10]
|
101 |
+
[[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
102 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
103 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
104 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
105 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n')],
|
106 |
+
[("Child's", 'n:prop')], [('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj')],
|
107 |
+
[("that's", 'pro:dem'), ('okay', 'adj')],
|
108 |
+
[('February', 'n:prop'), ('first', 'adj'), ('nineteen', 'det:num'),
|
109 |
+
('eighty', 'det:num'), ('four', 'det:num')],
|
110 |
+
[('great', 'adj')],
|
111 |
+
[('and', 'coord'), ("she's", 'pro:sub'), ('two', 'det:num'), ('years', 'n'), ('old', 'adj')],
|
112 |
+
[('correct', 'adj')],
|
113 |
+
[('okay', 'co')], [('she', 'pro:sub'), ('just', 'adv:int'), ('turned', 'part'), ('two', 'det:num'),
|
114 |
+
('a', 'det'), ('month', 'n'), ('ago', 'adv')]]
|
115 |
+
|
116 |
+
When the argument *stem* is true, the word stems (e.g., 'is' -> 'be-3PS') are
|
117 |
+
used instead of the original words.
|
118 |
+
|
119 |
+
>>> valian.words('Valian/01a.xml')[:30]
|
120 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'is', ...
|
121 |
+
>>> valian.words('Valian/01a.xml',stem=True)[:30]
|
122 |
+
['at', 'Parent', 'Lastname', 's', 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'be-3S', ...
|
123 |
+
|
124 |
+
When the argument *replace* is true, the replaced words are used instead of
|
125 |
+
the original words.
|
126 |
+
|
127 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI')[247]
|
128 |
+
'tikteat'
|
129 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI',replace=True)[247]
|
130 |
+
'trick'
|
131 |
+
|
132 |
+
When the argument *relation* is true, the relational relationships in the
|
133 |
+
sentence are returned. See Sagae et al. (2010) for details of the relational
|
134 |
+
structure adopted in the CHILDES.
|
135 |
+
|
136 |
+
>>> valian.words('Valian/01a.xml',relation=True)[:10]
|
137 |
+
[[('at', 'prep', '1|0|ROOT'), ('Parent', 'n', '2|5|VOC'), ('Lastname', 'n', '3|5|MOD'), ('s', 'poss', '4|5|MOD'), ('house', 'n', '5|1|POBJ'), ('with', 'prep', '6|1|JCT'), ('Child', 'n', '7|8|NAME'), ('Lastname', 'n', '8|6|POBJ'), ('and', 'coord', '9|8|COORD'), ('it', 'pro', '10|11|SUBJ'), ('be-3S', 'v', '11|9|COMP'), ('March', 'n', '12|11|PRED'), ('fourth', 'adj', '13|12|MOD'), ('I', 'pro', '15|16|SUBJ'), ('believe', 'v', '16|14|ROOT'), ('and', 'coord', '18|17|ROOT'), ('when', 'adv', '19|20|PRED'), ('be-PAST', 'v', '20|18|COMP'), ('Parent', 'n', '21|23|MOD'), ('s', 'poss', '22|23|MOD'), ('birth', 'n', '23|20|SUBJ')], [('Child', 'n', '1|2|MOD'), ('s', 'poss', '2|0|ROOT')], [('oh', 'co', '1|4|COM'), ('I', 'pro', '3|4|SUBJ'), ('be', 'v', '4|0|ROOT'), ('sorry', 'adj', '5|4|PRED')], [('that', 'pro', '1|2|SUBJ'), ('be', 'v', '2|0|ROOT'), ('okay', 'adj', '3|2|PRED')], [('February', 'n', '1|6|VOC'), ('first', 'adj', '2|6|ENUM'), ('nineteen', 'det', '4|6|ENUM'), ('eighty', 'det', '5|6|ENUM'), ('four', 'det', '6|0|ROOT')], [('great', 'adj', '1|0|ROOT')], [('and', 'coord', '1|0|ROOT'), ('she', 'pro', '2|1|ROOT'), ('be', 'aux', '3|5|AUX'), ('two', 'det', '4|5|QUANT'), ('year-PL', 'n', '5|2|ROOT'), ('old', 'adj', '6|5|MOD')], [('correct', 'adj', '1|0|ROOT')], [('okay', 'co', '1|0|ROOT')], [('she', 'pro', '1|0|ROOT'), ('just', 'adv', '2|3|JCT'), ('turn-PERF', 'part', '3|1|XCOMP'), ('two', 'det', '4|6|QUANT'), ('a', 'det', '5|6|DET'), ('month', 'n', '6|3|OBJ'), ('ago', 'adv', '7|3|JCT')]]
|
138 |
+
|
139 |
+
Printing age. When the argument *month* is true, the age information in
|
140 |
+
the CHILDES format is converted into the number of months.
|
141 |
+
|
142 |
+
>>> valian.age()
|
143 |
+
['P2Y1M3D', 'P2Y1M12D', 'P1Y9M21D', 'P1Y9M28D', 'P2Y1M23D', ...
|
144 |
+
>>> valian.age('Valian/01a.xml')
|
145 |
+
['P2Y1M3D']
|
146 |
+
>>> valian.age('Valian/01a.xml',month=True)
|
147 |
+
[25]
|
148 |
+
|
149 |
+
Printing MLU. The criteria for the MLU computation is broadly based on
|
150 |
+
Brown (1973).
|
151 |
+
|
152 |
+
>>> valian.MLU()
|
153 |
+
[2.3574660633484..., 2.292682926829..., 3.492857142857..., 2.961783439490...,
|
154 |
+
2.0842696629213..., 3.169811320754..., 3.137404580152..., 3.0578034682080...,
|
155 |
+
4.090163934426..., 3.488372093023..., 2.8773584905660..., 3.4792899408284...,
|
156 |
+
4.0111940298507..., 3.456790123456..., 4.487603305785..., 4.007936507936...,
|
157 |
+
5.25, 5.154696132596..., ...]
|
158 |
+
|
159 |
+
>>> valian.MLU('Valian/01a.xml')
|
160 |
+
[2.35746606334...]
|
161 |
+
|
162 |
+
|
163 |
+
Basic stuff
|
164 |
+
==============================
|
165 |
+
|
166 |
+
Count the number of words and sentences of each file.
|
167 |
+
|
168 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
169 |
+
>>> for this_file in valian.fileids()[:6]:
|
170 |
+
... print(valian.corpus(this_file)[0]['Corpus'], valian.corpus(this_file)[0]['Id'])
|
171 |
+
... print("num of words: %i" % len(valian.words(this_file)))
|
172 |
+
... print("num of sents: %i" % len(valian.sents(this_file)))
|
173 |
+
valian 01a
|
174 |
+
num of words: 3606
|
175 |
+
num of sents: 1027
|
176 |
+
valian 01b
|
177 |
+
num of words: 4376
|
178 |
+
num of sents: 1274
|
179 |
+
valian 02a
|
180 |
+
num of words: 2673
|
181 |
+
num of sents: 801
|
182 |
+
valian 02b
|
183 |
+
num of words: 5020
|
184 |
+
num of sents: 1583
|
185 |
+
valian 03a
|
186 |
+
num of words: 2743
|
187 |
+
num of sents: 988
|
188 |
+
valian 03b
|
189 |
+
num of words: 4409
|
190 |
+
num of sents: 1397
|
env-llmeval/lib/python3.10/site-packages/nltk/test/childes_fixt.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import nltk.data
|
5 |
+
|
6 |
+
try:
|
7 |
+
nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
|
8 |
+
except LookupError as e:
|
9 |
+
pytest.skip(
|
10 |
+
"The CHILDES corpus is not found. "
|
11 |
+
"It should be manually downloaded and saved/unpacked "
|
12 |
+
"to [NLTK_Data_Dir]/corpora/childes/"
|
13 |
+
)
|
env-llmeval/lib/python3.10/site-packages/nltk/test/chunk.doctest
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========
|
5 |
+
Chunking
|
6 |
+
==========
|
7 |
+
|
8 |
+
>>> from nltk.chunk import *
|
9 |
+
>>> from nltk.chunk.util import *
|
10 |
+
>>> from nltk.chunk.regexp import *
|
11 |
+
>>> from nltk import Tree
|
12 |
+
|
13 |
+
>>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./."
|
14 |
+
>>> gold_chunked_text = tagstr2tree(tagged_text)
|
15 |
+
>>> unchunked_text = gold_chunked_text.flatten()
|
16 |
+
|
17 |
+
Chunking uses a special regexp syntax for rules that delimit the chunks. These
|
18 |
+
rules must be converted to 'regular' regular expressions before a sentence can
|
19 |
+
be chunked.
|
20 |
+
|
21 |
+
>>> tag_pattern = "<DT>?<JJ>*<NN.*>"
|
22 |
+
>>> regexp_pattern = tag_pattern2re_pattern(tag_pattern)
|
23 |
+
>>> regexp_pattern
|
24 |
+
'(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)'
|
25 |
+
|
26 |
+
Construct some new chunking rules.
|
27 |
+
|
28 |
+
>>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything")
|
29 |
+
>>> strip_rule = StripRule(r"<VBD|IN|\.>", "Strip on verbs/prepositions")
|
30 |
+
>>> split_rule = SplitRule("<DT><NN>", "<DT><NN>",
|
31 |
+
... "Split successive determiner/noun pairs")
|
32 |
+
|
33 |
+
|
34 |
+
Create and score a series of chunk parsers, successively more complex.
|
35 |
+
|
36 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP')
|
37 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
38 |
+
>>> print(chunked_text)
|
39 |
+
(S
|
40 |
+
(NP
|
41 |
+
The/DT
|
42 |
+
cat/NN
|
43 |
+
sat/VBD
|
44 |
+
on/IN
|
45 |
+
the/DT
|
46 |
+
mat/NN
|
47 |
+
the/DT
|
48 |
+
dog/NN
|
49 |
+
chewed/VBD
|
50 |
+
./.))
|
51 |
+
|
52 |
+
>>> chunkscore = ChunkScore()
|
53 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
54 |
+
>>> print(chunkscore.precision())
|
55 |
+
0.0
|
56 |
+
|
57 |
+
>>> print(chunkscore.recall())
|
58 |
+
0.0
|
59 |
+
|
60 |
+
>>> print(chunkscore.f_measure())
|
61 |
+
0
|
62 |
+
|
63 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
64 |
+
(NP The/DT cat/NN)
|
65 |
+
(NP the/DT dog/NN)
|
66 |
+
(NP the/DT mat/NN)
|
67 |
+
|
68 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
69 |
+
(NP
|
70 |
+
The/DT
|
71 |
+
cat/NN
|
72 |
+
sat/VBD
|
73 |
+
on/IN
|
74 |
+
the/DT
|
75 |
+
mat/NN
|
76 |
+
the/DT
|
77 |
+
dog/NN
|
78 |
+
chewed/VBD
|
79 |
+
./.)
|
80 |
+
|
81 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule],
|
82 |
+
... chunk_label='NP')
|
83 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
84 |
+
>>> print(chunked_text)
|
85 |
+
(S
|
86 |
+
(NP The/DT cat/NN)
|
87 |
+
sat/VBD
|
88 |
+
on/IN
|
89 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
90 |
+
chewed/VBD
|
91 |
+
./.)
|
92 |
+
>>> assert chunked_text == chunk_parser.parse(list(unchunked_text))
|
93 |
+
|
94 |
+
>>> chunkscore = ChunkScore()
|
95 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
96 |
+
>>> chunkscore.precision()
|
97 |
+
0.5
|
98 |
+
|
99 |
+
>>> print(chunkscore.recall())
|
100 |
+
0.33333333...
|
101 |
+
|
102 |
+
>>> print(chunkscore.f_measure())
|
103 |
+
0.4
|
104 |
+
|
105 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
106 |
+
(NP the/DT dog/NN)
|
107 |
+
(NP the/DT mat/NN)
|
108 |
+
|
109 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
110 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
111 |
+
|
112 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule],
|
113 |
+
... chunk_label='NP')
|
114 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text, trace=True)
|
115 |
+
# Input:
|
116 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
117 |
+
# Chunk everything:
|
118 |
+
{<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>}
|
119 |
+
# Strip on verbs/prepositions:
|
120 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN> <DT> <NN>} <VBD> <.>
|
121 |
+
# Split successive determiner/noun pairs:
|
122 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
123 |
+
>>> print(chunked_text)
|
124 |
+
(S
|
125 |
+
(NP The/DT cat/NN)
|
126 |
+
sat/VBD
|
127 |
+
on/IN
|
128 |
+
(NP the/DT mat/NN)
|
129 |
+
(NP the/DT dog/NN)
|
130 |
+
chewed/VBD
|
131 |
+
./.)
|
132 |
+
|
133 |
+
>>> chunkscore = ChunkScore()
|
134 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
135 |
+
>>> chunkscore.precision()
|
136 |
+
1.0
|
137 |
+
|
138 |
+
>>> chunkscore.recall()
|
139 |
+
1.0
|
140 |
+
|
141 |
+
>>> chunkscore.f_measure()
|
142 |
+
1.0
|
143 |
+
|
144 |
+
>>> chunkscore.missed()
|
145 |
+
[]
|
146 |
+
|
147 |
+
>>> chunkscore.incorrect()
|
148 |
+
[]
|
149 |
+
|
150 |
+
>>> chunk_parser.rules()
|
151 |
+
[<ChunkRule: '<.*>+'>, <StripRule: '<VBD|IN|\\.>'>,
|
152 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>]
|
153 |
+
|
154 |
+
Printing parsers:
|
155 |
+
|
156 |
+
>>> print(repr(chunk_parser))
|
157 |
+
<RegexpChunkParser with 3 rules>
|
158 |
+
>>> print(chunk_parser)
|
159 |
+
RegexpChunkParser with 3 rules:
|
160 |
+
Chunk everything
|
161 |
+
<ChunkRule: '<.*>+'>
|
162 |
+
Strip on verbs/prepositions
|
163 |
+
<StripRule: '<VBD|IN|\\.>'>
|
164 |
+
Split successive determiner/noun pairs
|
165 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>
|
166 |
+
|
167 |
+
Regression Tests
|
168 |
+
~~~~~~~~~~~~~~~~
|
169 |
+
ChunkParserI
|
170 |
+
------------
|
171 |
+
`ChunkParserI` is an abstract interface -- it is not meant to be
|
172 |
+
instantiated directly.
|
173 |
+
|
174 |
+
>>> ChunkParserI().parse([])
|
175 |
+
Traceback (most recent call last):
|
176 |
+
. . .
|
177 |
+
NotImplementedError
|
178 |
+
|
179 |
+
|
180 |
+
ChunkString
|
181 |
+
-----------
|
182 |
+
ChunkString can be built from a tree of tagged tuples, a tree of
|
183 |
+
trees, or a mixed list of both:
|
184 |
+
|
185 |
+
>>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)])
|
186 |
+
>>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])])
|
187 |
+
>>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])])
|
188 |
+
>>> ChunkString(t1)
|
189 |
+
<ChunkString: '<t0><t1><t2><t3><t4><t5><t6><t7><t8><t9>'>
|
190 |
+
>>> ChunkString(t2)
|
191 |
+
<ChunkString: '<t0><t1>'>
|
192 |
+
>>> ChunkString(t3)
|
193 |
+
<ChunkString: '<t0><t1>'>
|
194 |
+
|
195 |
+
Other values generate an error:
|
196 |
+
|
197 |
+
>>> ChunkString(Tree('S', ['x']))
|
198 |
+
Traceback (most recent call last):
|
199 |
+
. . .
|
200 |
+
ValueError: chunk structures must contain tagged tokens or trees
|
201 |
+
|
202 |
+
The `str()` for a chunk string adds spaces to it, which makes it line
|
203 |
+
up with `str()` output for other chunk strings over the same
|
204 |
+
underlying input.
|
205 |
+
|
206 |
+
>>> cs = ChunkString(t1)
|
207 |
+
>>> print(cs)
|
208 |
+
<t0> <t1> <t2> <t3> <t4> <t5> <t6> <t7> <t8> <t9>
|
209 |
+
>>> cs.xform('<t3>', '{<t3>}')
|
210 |
+
>>> print(cs)
|
211 |
+
<t0> <t1> <t2> {<t3>} <t4> <t5> <t6> <t7> <t8> <t9>
|
212 |
+
|
213 |
+
The `_verify()` method makes sure that our transforms don't corrupt
|
214 |
+
the chunk string. By setting debug_level=2, `_verify()` will be
|
215 |
+
called at the end of every call to `xform`.
|
216 |
+
|
217 |
+
>>> cs = ChunkString(t1, debug_level=3)
|
218 |
+
|
219 |
+
>>> # tag not marked with <...>:
|
220 |
+
>>> cs.xform('<t3>', 't3')
|
221 |
+
Traceback (most recent call last):
|
222 |
+
. . .
|
223 |
+
ValueError: Transformation generated invalid chunkstring:
|
224 |
+
<t0><t1><t2>t3<t4><t5><t6><t7><t8><t9>
|
225 |
+
|
226 |
+
>>> # brackets not balanced:
|
227 |
+
>>> cs.xform('<t3>', '{<t3>')
|
228 |
+
Traceback (most recent call last):
|
229 |
+
. . .
|
230 |
+
ValueError: Transformation generated invalid chunkstring:
|
231 |
+
<t0><t1><t2>{<t3><t4><t5><t6><t7><t8><t9>
|
232 |
+
|
233 |
+
>>> # nested brackets:
|
234 |
+
>>> cs.xform('<t3><t4><t5>', '{<t3>{<t4>}<t5>}')
|
235 |
+
Traceback (most recent call last):
|
236 |
+
. . .
|
237 |
+
ValueError: Transformation generated invalid chunkstring:
|
238 |
+
<t0><t1><t2>{<t3>{<t4>}<t5>}<t6><t7><t8><t9>
|
239 |
+
|
240 |
+
>>> # modified tags:
|
241 |
+
>>> cs.xform('<t3>', '<t9>')
|
242 |
+
Traceback (most recent call last):
|
243 |
+
. . .
|
244 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
245 |
+
|
246 |
+
>>> # added tags:
|
247 |
+
>>> cs.xform('<t9>', '<t9><t10>')
|
248 |
+
Traceback (most recent call last):
|
249 |
+
. . .
|
250 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
251 |
+
|
252 |
+
Chunking Rules
|
253 |
+
--------------
|
254 |
+
|
255 |
+
Test the different rule constructors & __repr__ methods:
|
256 |
+
|
257 |
+
>>> r1 = RegexpChunkRule('<a|b>'+ChunkString.IN_STRIP_PATTERN,
|
258 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
259 |
+
>>> r2 = RegexpChunkRule(re.compile('<a|b>'+ChunkString.IN_STRIP_PATTERN),
|
260 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
261 |
+
>>> r3 = ChunkRule('<a|b>', 'chunk <a> and <b>')
|
262 |
+
>>> r4 = StripRule('<a|b>', 'strip <a> and <b>')
|
263 |
+
>>> r5 = UnChunkRule('<a|b>', 'unchunk <a> and <b>')
|
264 |
+
>>> r6 = MergeRule('<a>', '<b>', 'merge <a> w/ <b>')
|
265 |
+
>>> r7 = SplitRule('<a>', '<b>', 'split <a> from <b>')
|
266 |
+
>>> r8 = ExpandLeftRule('<a>', '<b>', 'expand left <a> <b>')
|
267 |
+
>>> r9 = ExpandRightRule('<a>', '<b>', 'expand right <a> <b>')
|
268 |
+
>>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9:
|
269 |
+
... print(rule)
|
270 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
271 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
272 |
+
<ChunkRule: '<a|b>'>
|
273 |
+
<StripRule: '<a|b>'>
|
274 |
+
<UnChunkRule: '<a|b>'>
|
275 |
+
<MergeRule: '<a>', '<b>'>
|
276 |
+
<SplitRule: '<a>', '<b>'>
|
277 |
+
<ExpandLeftRule: '<a>', '<b>'>
|
278 |
+
<ExpandRightRule: '<a>', '<b>'>
|
279 |
+
|
280 |
+
`tag_pattern2re_pattern()` complains if the tag pattern looks problematic:
|
281 |
+
|
282 |
+
>>> tag_pattern2re_pattern('{}')
|
283 |
+
Traceback (most recent call last):
|
284 |
+
. . .
|
285 |
+
ValueError: Bad tag pattern: '{}'
|
286 |
+
|
287 |
+
RegexpChunkParser
|
288 |
+
-----------------
|
289 |
+
|
290 |
+
A warning is printed when parsing an empty sentence:
|
291 |
+
|
292 |
+
>>> parser = RegexpChunkParser([ChunkRule('<a>', '')])
|
293 |
+
>>> parser.parse(Tree('S', []))
|
294 |
+
Warning: parsing empty text
|
295 |
+
Tree('S', [])
|
296 |
+
|
297 |
+
RegexpParser
|
298 |
+
------------
|
299 |
+
|
300 |
+
>>> parser = RegexpParser('''
|
301 |
+
... NP: {<DT>? <JJ>* <NN>*} # NP
|
302 |
+
... P: {<IN>} # Preposition
|
303 |
+
... V: {<V.*>} # Verb
|
304 |
+
... PP: {<P> <NP>} # PP -> P NP
|
305 |
+
... VP: {<V> <NP|PP>*} # VP -> V (NP|PP)*
|
306 |
+
... ''')
|
307 |
+
>>> print(repr(parser))
|
308 |
+
<chunk.RegexpParser with 5 stages>
|
309 |
+
>>> print(parser)
|
310 |
+
chunk.RegexpParser with 5 stages:
|
311 |
+
RegexpChunkParser with 1 rules:
|
312 |
+
NP <ChunkRule: '<DT>? <JJ>* <NN>*'>
|
313 |
+
RegexpChunkParser with 1 rules:
|
314 |
+
Preposition <ChunkRule: '<IN>'>
|
315 |
+
RegexpChunkParser with 1 rules:
|
316 |
+
Verb <ChunkRule: '<V.*>'>
|
317 |
+
RegexpChunkParser with 1 rules:
|
318 |
+
PP -> P NP <ChunkRule: '<P> <NP>'>
|
319 |
+
RegexpChunkParser with 1 rules:
|
320 |
+
VP -> V (NP|PP)* <ChunkRule: '<V> <NP|PP>*'>
|
321 |
+
>>> print(parser.parse(unchunked_text, trace=True))
|
322 |
+
# Input:
|
323 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
324 |
+
# NP:
|
325 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
326 |
+
# Input:
|
327 |
+
<NP> <VBD> <IN> <NP> <NP> <VBD> <.>
|
328 |
+
# Preposition:
|
329 |
+
<NP> <VBD> {<IN>} <NP> <NP> <VBD> <.>
|
330 |
+
# Input:
|
331 |
+
<NP> <VBD> <P> <NP> <NP> <VBD> <.>
|
332 |
+
# Verb:
|
333 |
+
<NP> {<VBD>} <P> <NP> <NP> {<VBD>} <.>
|
334 |
+
# Input:
|
335 |
+
<NP> <V> <P> <NP> <NP> <V> <.>
|
336 |
+
# PP -> P NP:
|
337 |
+
<NP> <V> {<P> <NP>} <NP> <V> <.>
|
338 |
+
# Input:
|
339 |
+
<NP> <V> <PP> <NP> <V> <.>
|
340 |
+
# VP -> V (NP|PP)*:
|
341 |
+
<NP> {<V> <PP> <NP>}{<V>} <.>
|
342 |
+
(S
|
343 |
+
(NP The/DT cat/NN)
|
344 |
+
(VP
|
345 |
+
(V sat/VBD)
|
346 |
+
(PP (P on/IN) (NP the/DT mat/NN))
|
347 |
+
(NP the/DT dog/NN))
|
348 |
+
(VP (V chewed/VBD))
|
349 |
+
./.)
|
350 |
+
|
351 |
+
Test parsing of other rule types:
|
352 |
+
|
353 |
+
>>> print(RegexpParser('''
|
354 |
+
... X:
|
355 |
+
... }<a><b>{ # strip rule
|
356 |
+
... <a>}{<b> # split rule
|
357 |
+
... <a>{}<b> # merge rule
|
358 |
+
... <a>{<b>}<c> # chunk rule w/ context
|
359 |
+
... '''))
|
360 |
+
chunk.RegexpParser with 1 stages:
|
361 |
+
RegexpChunkParser with 4 rules:
|
362 |
+
strip rule <StripRule: '<a><b>'>
|
363 |
+
split rule <SplitRule: '<a>', '<b>'>
|
364 |
+
merge rule <MergeRule: '<a>', '<b>'>
|
365 |
+
chunk rule w/ context <ChunkRuleWithContext: '<a>', '<b>', '<c>'>
|
366 |
+
|
367 |
+
Illegal patterns give an error message:
|
368 |
+
|
369 |
+
>>> print(RegexpParser('X: {<foo>} {<bar>}'))
|
370 |
+
Traceback (most recent call last):
|
371 |
+
. . .
|
372 |
+
ValueError: Illegal chunk pattern: {<foo>} {<bar>}
|
env-llmeval/lib/python3.10/site-packages/nltk/test/collections.doctest
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===========
|
5 |
+
Collections
|
6 |
+
===========
|
7 |
+
|
8 |
+
>>> import nltk
|
9 |
+
>>> from nltk.collections import *
|
10 |
+
|
11 |
+
Trie
|
12 |
+
----
|
13 |
+
|
14 |
+
Trie can be pickled:
|
15 |
+
|
16 |
+
>>> import pickle
|
17 |
+
>>> trie = nltk.collections.Trie(['a'])
|
18 |
+
>>> s = pickle.dumps(trie)
|
19 |
+
>>> pickle.loads(s)
|
20 |
+
{'a': {True: None}}
|
21 |
+
|
22 |
+
LazyIteratorList
|
23 |
+
----------------
|
24 |
+
|
25 |
+
Fetching the length of a LazyIteratorList object does not throw a StopIteration exception:
|
26 |
+
|
27 |
+
>>> lil = LazyIteratorList(i for i in range(1, 11))
|
28 |
+
>>> lil[-1]
|
29 |
+
10
|
30 |
+
>>> len(lil)
|
31 |
+
10
|
env-llmeval/lib/python3.10/site-packages/nltk/test/conftest.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.corpus.reader import CorpusReader
|
4 |
+
|
5 |
+
|
6 |
+
@pytest.fixture(autouse=True)
|
7 |
+
def mock_plot(mocker):
|
8 |
+
"""Disable matplotlib plotting in test code"""
|
9 |
+
|
10 |
+
try:
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
|
13 |
+
mocker.patch.object(plt, "gca")
|
14 |
+
mocker.patch.object(plt, "show")
|
15 |
+
except ImportError:
|
16 |
+
pass
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture(scope="module", autouse=True)
|
20 |
+
def teardown_loaded_corpora():
|
21 |
+
"""
|
22 |
+
After each test session ends (either doctest or unit test),
|
23 |
+
unload any loaded corpora
|
24 |
+
"""
|
25 |
+
|
26 |
+
yield # first, wait for the test to end
|
27 |
+
|
28 |
+
import nltk.corpus
|
29 |
+
|
30 |
+
for name in dir(nltk.corpus):
|
31 |
+
obj = getattr(nltk.corpus, name, None)
|
32 |
+
if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"):
|
33 |
+
obj._unload()
|
env-llmeval/lib/python3.10/site-packages/nltk/test/corpus.doctest
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/nltk/test/crubadan.doctest
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
Crubadan Corpus Reader
|
5 |
+
======================
|
6 |
+
|
7 |
+
Crubadan is an NLTK corpus reader for ngram files provided
|
8 |
+
by the Crubadan project. It supports several languages.
|
9 |
+
|
10 |
+
>>> from nltk.corpus import crubadan
|
11 |
+
>>> crubadan.langs()
|
12 |
+
['abk', 'abn',..., 'zpa', 'zul']
|
13 |
+
|
14 |
+
----------------------------------------
|
15 |
+
Language code mapping and helper methods
|
16 |
+
----------------------------------------
|
17 |
+
|
18 |
+
The web crawler that generates the 3-gram frequencies works at the
|
19 |
+
level of "writing systems" rather than languages. Writing systems
|
20 |
+
are assigned internal 2-3 letter codes that require mapping to the
|
21 |
+
standard ISO 639-3 codes. For more information, please refer to
|
22 |
+
the README in nltk_data/crubadan folder after installing it.
|
23 |
+
|
24 |
+
To translate ISO 639-3 codes to "Crubadan Code":
|
25 |
+
|
26 |
+
>>> crubadan.iso_to_crubadan('eng')
|
27 |
+
'en'
|
28 |
+
>>> crubadan.iso_to_crubadan('fra')
|
29 |
+
'fr'
|
30 |
+
>>> crubadan.iso_to_crubadan('aaa')
|
31 |
+
|
32 |
+
In reverse, print ISO 639-3 code if we have the Crubadan Code:
|
33 |
+
|
34 |
+
>>> crubadan.crubadan_to_iso('en')
|
35 |
+
'eng'
|
36 |
+
>>> crubadan.crubadan_to_iso('fr')
|
37 |
+
'fra'
|
38 |
+
>>> crubadan.crubadan_to_iso('aa')
|
39 |
+
|
40 |
+
---------------------------
|
41 |
+
Accessing ngram frequencies
|
42 |
+
---------------------------
|
43 |
+
|
44 |
+
On initialization the reader will create a dictionary of every
|
45 |
+
language supported by the Crubadan project, mapping the ISO 639-3
|
46 |
+
language code to its corresponding ngram frequency.
|
47 |
+
|
48 |
+
You can access individual language FreqDist and the ngrams within them as follows:
|
49 |
+
|
50 |
+
>>> english_fd = crubadan.lang_freq('eng')
|
51 |
+
>>> english_fd['the']
|
52 |
+
728135
|
53 |
+
|
54 |
+
Above accesses the FreqDist of English and returns the frequency of the ngram 'the'.
|
55 |
+
A ngram that isn't found within the language will return 0:
|
56 |
+
|
57 |
+
>>> english_fd['sometest']
|
58 |
+
0
|
59 |
+
|
60 |
+
A language that isn't supported will raise an exception:
|
61 |
+
|
62 |
+
>>> crubadan.lang_freq('elvish')
|
63 |
+
Traceback (most recent call last):
|
64 |
+
...
|
65 |
+
RuntimeError: Unsupported language.
|
env-llmeval/lib/python3.10/site-packages/nltk/test/dependency.doctest
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===================
|
5 |
+
Dependency Grammars
|
6 |
+
===================
|
7 |
+
|
8 |
+
>>> from nltk.grammar import DependencyGrammar
|
9 |
+
>>> from nltk.parse import (
|
10 |
+
... DependencyGraph,
|
11 |
+
... ProjectiveDependencyParser,
|
12 |
+
... NonprojectiveDependencyParser,
|
13 |
+
... )
|
14 |
+
|
15 |
+
CoNLL Data
|
16 |
+
----------
|
17 |
+
|
18 |
+
>>> treebank_data = """Pierre NNP 2 NMOD
|
19 |
+
... Vinken NNP 8 SUB
|
20 |
+
... , , 2 P
|
21 |
+
... 61 CD 5 NMOD
|
22 |
+
... years NNS 6 AMOD
|
23 |
+
... old JJ 2 NMOD
|
24 |
+
... , , 2 P
|
25 |
+
... will MD 0 ROOT
|
26 |
+
... join VB 8 VC
|
27 |
+
... the DT 11 NMOD
|
28 |
+
... board NN 9 OBJ
|
29 |
+
... as IN 9 VMOD
|
30 |
+
... a DT 15 NMOD
|
31 |
+
... nonexecutive JJ 15 NMOD
|
32 |
+
... director NN 12 PMOD
|
33 |
+
... Nov. NNP 9 VMOD
|
34 |
+
... 29 CD 16 NMOD
|
35 |
+
... . . 9 VMOD
|
36 |
+
... """
|
37 |
+
|
38 |
+
>>> dg = DependencyGraph(treebank_data)
|
39 |
+
>>> dg.tree().pprint()
|
40 |
+
(will
|
41 |
+
(Vinken Pierre , (old (years 61)) ,)
|
42 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29) .))
|
43 |
+
>>> for head, rel, dep in dg.triples():
|
44 |
+
... print(
|
45 |
+
... '({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})'
|
46 |
+
... .format(h=head, r=rel, d=dep)
|
47 |
+
... )
|
48 |
+
(will, MD), SUB, (Vinken, NNP)
|
49 |
+
(Vinken, NNP), NMOD, (Pierre, NNP)
|
50 |
+
(Vinken, NNP), P, (,, ,)
|
51 |
+
(Vinken, NNP), NMOD, (old, JJ)
|
52 |
+
(old, JJ), AMOD, (years, NNS)
|
53 |
+
(years, NNS), NMOD, (61, CD)
|
54 |
+
(Vinken, NNP), P, (,, ,)
|
55 |
+
(will, MD), VC, (join, VB)
|
56 |
+
(join, VB), OBJ, (board, NN)
|
57 |
+
(board, NN), NMOD, (the, DT)
|
58 |
+
(join, VB), VMOD, (as, IN)
|
59 |
+
(as, IN), PMOD, (director, NN)
|
60 |
+
(director, NN), NMOD, (a, DT)
|
61 |
+
(director, NN), NMOD, (nonexecutive, JJ)
|
62 |
+
(join, VB), VMOD, (Nov., NNP)
|
63 |
+
(Nov., NNP), NMOD, (29, CD)
|
64 |
+
(join, VB), VMOD, (., .)
|
65 |
+
|
66 |
+
Using a custom cell extractor.
|
67 |
+
|
68 |
+
>>> def custom_extractor(cells):
|
69 |
+
... _, tag, head, rel = cells
|
70 |
+
... return 'spam', 'spam', tag, tag, '', head, rel
|
71 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
72 |
+
>>> dg.tree().pprint()
|
73 |
+
(spam
|
74 |
+
(spam spam spam (spam (spam spam)) spam)
|
75 |
+
(spam (spam spam) (spam (spam spam spam)) (spam spam) spam))
|
76 |
+
|
77 |
+
Custom cell extractors can take in and return an index.
|
78 |
+
|
79 |
+
>>> def custom_extractor(cells, index):
|
80 |
+
... word, tag, head, rel = cells
|
81 |
+
... return (index, '{}-{}'.format(word, index), word,
|
82 |
+
... tag, tag, '', head, rel)
|
83 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
84 |
+
>>> dg.tree().pprint()
|
85 |
+
(will-8
|
86 |
+
(Vinken-2 Pierre-1 ,-3 (old-6 (years-5 61-4)) ,-7)
|
87 |
+
(join-9
|
88 |
+
(board-11 the-10)
|
89 |
+
(as-12 (director-15 a-13 nonexecutive-14))
|
90 |
+
(Nov.-16 29-17)
|
91 |
+
.-18))
|
92 |
+
|
93 |
+
Using the dependency-parsed version of the Penn Treebank corpus sample.
|
94 |
+
|
95 |
+
>>> from nltk.corpus import dependency_treebank
|
96 |
+
>>> t = dependency_treebank.parsed_sents()[0]
|
97 |
+
>>> print(t.to_conll(3))
|
98 |
+
Pierre NNP 2
|
99 |
+
Vinken NNP 8
|
100 |
+
, , 2
|
101 |
+
61 CD 5
|
102 |
+
years NNS 6
|
103 |
+
old JJ 2
|
104 |
+
, , 2
|
105 |
+
will MD 0
|
106 |
+
join VB 8
|
107 |
+
the DT 11
|
108 |
+
board NN 9
|
109 |
+
as IN 9
|
110 |
+
a DT 15
|
111 |
+
nonexecutive JJ 15
|
112 |
+
director NN 12
|
113 |
+
Nov. NNP 9
|
114 |
+
29 CD 16
|
115 |
+
. . 8
|
116 |
+
|
117 |
+
Using the output of zpar (like Malt-TAB but with zero-based indexing)
|
118 |
+
|
119 |
+
>>> zpar_data = """
|
120 |
+
... Pierre NNP 1 NMOD
|
121 |
+
... Vinken NNP 7 SUB
|
122 |
+
... , , 1 P
|
123 |
+
... 61 CD 4 NMOD
|
124 |
+
... years NNS 5 AMOD
|
125 |
+
... old JJ 1 NMOD
|
126 |
+
... , , 1 P
|
127 |
+
... will MD -1 ROOT
|
128 |
+
... join VB 7 VC
|
129 |
+
... the DT 10 NMOD
|
130 |
+
... board NN 8 OBJ
|
131 |
+
... as IN 8 VMOD
|
132 |
+
... a DT 14 NMOD
|
133 |
+
... nonexecutive JJ 14 NMOD
|
134 |
+
... director NN 11 PMOD
|
135 |
+
... Nov. NNP 8 VMOD
|
136 |
+
... 29 CD 15 NMOD
|
137 |
+
... . . 7 P
|
138 |
+
... """
|
139 |
+
|
140 |
+
>>> zdg = DependencyGraph(zpar_data, zero_based=True)
|
141 |
+
>>> print(zdg.tree())
|
142 |
+
(will
|
143 |
+
(Vinken Pierre , (old (years 61)) ,)
|
144 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29))
|
145 |
+
.)
|
146 |
+
|
147 |
+
|
148 |
+
Projective Dependency Parsing
|
149 |
+
-----------------------------
|
150 |
+
|
151 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
152 |
+
... 'fell' -> 'price' | 'stock'
|
153 |
+
... 'price' -> 'of' 'the'
|
154 |
+
... 'of' -> 'stock'
|
155 |
+
... 'stock' -> 'the'
|
156 |
+
... """)
|
157 |
+
>>> print(grammar)
|
158 |
+
Dependency grammar with 5 productions
|
159 |
+
'fell' -> 'price'
|
160 |
+
'fell' -> 'stock'
|
161 |
+
'price' -> 'of' 'the'
|
162 |
+
'of' -> 'stock'
|
163 |
+
'stock' -> 'the'
|
164 |
+
|
165 |
+
>>> dp = ProjectiveDependencyParser(grammar)
|
166 |
+
>>> for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])):
|
167 |
+
... print(t)
|
168 |
+
(fell (price the (of (stock the))))
|
169 |
+
(fell (price the of) (stock the))
|
170 |
+
(fell (price the of the) stock)
|
171 |
+
|
172 |
+
Non-Projective Dependency Parsing
|
173 |
+
---------------------------------
|
174 |
+
|
175 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
176 |
+
... 'taught' -> 'play' | 'man'
|
177 |
+
... 'man' -> 'the'
|
178 |
+
... 'play' -> 'golf' | 'dog' | 'to'
|
179 |
+
... 'dog' -> 'his'
|
180 |
+
... """)
|
181 |
+
>>> print(grammar)
|
182 |
+
Dependency grammar with 7 productions
|
183 |
+
'taught' -> 'play'
|
184 |
+
'taught' -> 'man'
|
185 |
+
'man' -> 'the'
|
186 |
+
'play' -> 'golf'
|
187 |
+
'play' -> 'dog'
|
188 |
+
'play' -> 'to'
|
189 |
+
'dog' -> 'his'
|
190 |
+
|
191 |
+
>>> dp = NonprojectiveDependencyParser(grammar)
|
192 |
+
>>> g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
|
193 |
+
|
194 |
+
>>> print(g.root['word'])
|
195 |
+
taught
|
196 |
+
|
197 |
+
>>> for _, node in sorted(g.nodes.items()):
|
198 |
+
... if node['word'] is not None:
|
199 |
+
... print('{address} {word}: {d}'.format(d=node['deps'][''], **node))
|
200 |
+
1 the: []
|
201 |
+
2 man: [1]
|
202 |
+
3 taught: [2, 7]
|
203 |
+
4 his: []
|
204 |
+
5 dog: [4]
|
205 |
+
6 to: []
|
206 |
+
7 play: [5, 6, 8]
|
207 |
+
8 golf: []
|
208 |
+
|
209 |
+
>>> print(g.tree())
|
210 |
+
(taught (man the) (play (dog his) to golf))
|
211 |
+
|
212 |
+
Integration with MALT parser
|
213 |
+
============================
|
214 |
+
|
215 |
+
In case the top relation is different from the default, we can set it. In case
|
216 |
+
of MALT parser, it's set to `'null'`.
|
217 |
+
|
218 |
+
>>> dg_str = """1 I _ NN NN _ 2 nn _ _
|
219 |
+
... 2 shot _ NN NN _ 0 null _ _
|
220 |
+
... 3 an _ AT AT _ 2 dep _ _
|
221 |
+
... 4 elephant _ NN NN _ 7 nn _ _
|
222 |
+
... 5 in _ NN NN _ 7 nn _ _
|
223 |
+
... 6 my _ NN NN _ 7 nn _ _
|
224 |
+
... 7 pajamas _ NNS NNS _ 3 dobj _ _
|
225 |
+
... """
|
226 |
+
>>> dg = DependencyGraph(dg_str, top_relation_label='null')
|
227 |
+
|
228 |
+
>>> len(dg.nodes)
|
229 |
+
8
|
230 |
+
|
231 |
+
>>> dg.root['word'], dg.root['address']
|
232 |
+
('shot', 2)
|
233 |
+
|
234 |
+
>>> print(dg.to_conll(10))
|
235 |
+
1 I _ NN NN _ 2 nn _ _
|
236 |
+
2 shot _ NN NN _ 0 null _ _
|
237 |
+
3 an _ AT AT _ 2 dep _ _
|
238 |
+
4 elephant _ NN NN _ 7 nn _ _
|
239 |
+
5 in _ NN NN _ 7 nn _ _
|
240 |
+
6 my _ NN NN _ 7 nn _ _
|
241 |
+
7 pajamas _ NNS NNS _ 3 dobj _ _
|