applied-ai-018 commited on
Commit
bfddfae
·
verified ·
1 Parent(s): 738b6e4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/nltk/inference/__init__.py +24 -0
  2. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/inference/api.py +614 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/inference/discourse.py +651 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/inference/mace.py +383 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py +561 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/inference/prover9.py +508 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/inference/resolution.py +759 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/inference/tableau.py +712 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__init__.py +51 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/metrics/agreement.py +465 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/metrics/aline.py +1354 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/metrics/association.py +476 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py +353 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/metrics/distance.py +508 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/metrics/paice.py +389 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/metrics/scores.py +228 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/metrics/segmentation.py +222 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/metrics/spearman.py +68 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/misc/__init__.py +11 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/misc/sort.py +176 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/misc/wordfinder.py +139 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py +0 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py +116 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py +610 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py +30 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py +156 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py +0 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/nltk/inference/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Inference
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Garrette <[email protected]>
5
+ # Ewan Klein <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Classes and interfaces for theorem proving and model building.
12
+ """
13
+
14
+ from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand
15
+ from nltk.inference.discourse import (
16
+ CfgReadingCommand,
17
+ DiscourseTester,
18
+ DrtGlueReadingCommand,
19
+ ReadingCommand,
20
+ )
21
+ from nltk.inference.mace import Mace, MaceCommand
22
+ from nltk.inference.prover9 import Prover9, Prover9Command
23
+ from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand
24
+ from nltk.inference.tableau import TableauProver, TableauProverCommand
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (860 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/inference/api.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifier Interface
2
+ #
3
+ # Author: Ewan Klein <[email protected]>
4
+ # Dan Garrette <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Interfaces and base classes for theorem provers and model builders.
11
+
12
+ ``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a
13
+ list of assumptions.
14
+
15
+ ``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions.
16
+ the model builder tries to build a model for the assumptions. Given a set of assumptions and a
17
+ goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy
18
+ the assumptions plus the negation of *G*.
19
+ """
20
+
21
+ import threading
22
+ import time
23
+ from abc import ABCMeta, abstractmethod
24
+
25
+
26
+ class Prover(metaclass=ABCMeta):
27
+ """
28
+ Interface for trying to prove a goal from assumptions. Both the goal and
29
+ the assumptions are constrained to be formulas of ``logic.Expression``.
30
+ """
31
+
32
+ def prove(self, goal=None, assumptions=None, verbose=False):
33
+ """
34
+ :return: Whether the proof was successful or not.
35
+ :rtype: bool
36
+ """
37
+ return self._prove(goal, assumptions, verbose)[0]
38
+
39
+ @abstractmethod
40
+ def _prove(self, goal=None, assumptions=None, verbose=False):
41
+ """
42
+ :return: Whether the proof was successful or not, along with the proof
43
+ :rtype: tuple: (bool, str)
44
+ """
45
+
46
+
47
+ class ModelBuilder(metaclass=ABCMeta):
48
+ """
49
+ Interface for trying to build a model of set of formulas.
50
+ Open formulas are assumed to be universally quantified.
51
+ Both the goal and the assumptions are constrained to be formulas
52
+ of ``logic.Expression``.
53
+ """
54
+
55
+ def build_model(self, goal=None, assumptions=None, verbose=False):
56
+ """
57
+ Perform the actual model building.
58
+ :return: Whether a model was generated
59
+ :rtype: bool
60
+ """
61
+ return self._build_model(goal, assumptions, verbose)[0]
62
+
63
+ @abstractmethod
64
+ def _build_model(self, goal=None, assumptions=None, verbose=False):
65
+ """
66
+ Perform the actual model building.
67
+ :return: Whether a model was generated, and the model itself
68
+ :rtype: tuple(bool, sem.Valuation)
69
+ """
70
+
71
+
72
+ class TheoremToolCommand(metaclass=ABCMeta):
73
+ """
74
+ This class holds a goal and a list of assumptions to be used in proving
75
+ or model building.
76
+ """
77
+
78
+ @abstractmethod
79
+ def add_assumptions(self, new_assumptions):
80
+ """
81
+ Add new assumptions to the assumption list.
82
+
83
+ :param new_assumptions: new assumptions
84
+ :type new_assumptions: list(sem.Expression)
85
+ """
86
+
87
+ @abstractmethod
88
+ def retract_assumptions(self, retracted, debug=False):
89
+ """
90
+ Retract assumptions from the assumption list.
91
+
92
+ :param debug: If True, give warning when ``retracted`` is not present on
93
+ assumptions list.
94
+ :type debug: bool
95
+ :param retracted: assumptions to be retracted
96
+ :type retracted: list(sem.Expression)
97
+ """
98
+
99
+ @abstractmethod
100
+ def assumptions(self):
101
+ """
102
+ List the current assumptions.
103
+
104
+ :return: list of ``Expression``
105
+ """
106
+
107
+ @abstractmethod
108
+ def goal(self):
109
+ """
110
+ Return the goal
111
+
112
+ :return: ``Expression``
113
+ """
114
+
115
+ @abstractmethod
116
+ def print_assumptions(self):
117
+ """
118
+ Print the list of the current assumptions.
119
+ """
120
+
121
+
122
+ class ProverCommand(TheoremToolCommand):
123
+ """
124
+ This class holds a ``Prover``, a goal, and a list of assumptions. When
125
+ prove() is called, the ``Prover`` is executed with the goal and assumptions.
126
+ """
127
+
128
+ @abstractmethod
129
+ def prove(self, verbose=False):
130
+ """
131
+ Perform the actual proof.
132
+ """
133
+
134
+ @abstractmethod
135
+ def proof(self, simplify=True):
136
+ """
137
+ Return the proof string
138
+ :param simplify: bool simplify the proof?
139
+ :return: str
140
+ """
141
+
142
+ @abstractmethod
143
+ def get_prover(self):
144
+ """
145
+ Return the prover object
146
+ :return: ``Prover``
147
+ """
148
+
149
+
150
+ class ModelBuilderCommand(TheoremToolCommand):
151
+ """
152
+ This class holds a ``ModelBuilder``, a goal, and a list of assumptions.
153
+ When build_model() is called, the ``ModelBuilder`` is executed with the goal
154
+ and assumptions.
155
+ """
156
+
157
+ @abstractmethod
158
+ def build_model(self, verbose=False):
159
+ """
160
+ Perform the actual model building.
161
+ :return: A model if one is generated; None otherwise.
162
+ :rtype: sem.Valuation
163
+ """
164
+
165
+ @abstractmethod
166
+ def model(self, format=None):
167
+ """
168
+ Return a string representation of the model
169
+
170
+ :param simplify: bool simplify the proof?
171
+ :return: str
172
+ """
173
+
174
+ @abstractmethod
175
+ def get_model_builder(self):
176
+ """
177
+ Return the model builder object
178
+ :return: ``ModelBuilder``
179
+ """
180
+
181
+
182
+ class BaseTheoremToolCommand(TheoremToolCommand):
183
+ """
184
+ This class holds a goal and a list of assumptions to be used in proving
185
+ or model building.
186
+ """
187
+
188
+ def __init__(self, goal=None, assumptions=None):
189
+ """
190
+ :param goal: Input expression to prove
191
+ :type goal: sem.Expression
192
+ :param assumptions: Input expressions to use as assumptions in
193
+ the proof.
194
+ :type assumptions: list(sem.Expression)
195
+ """
196
+ self._goal = goal
197
+
198
+ if not assumptions:
199
+ self._assumptions = []
200
+ else:
201
+ self._assumptions = list(assumptions)
202
+
203
+ self._result = None
204
+ """A holder for the result, to prevent unnecessary re-proving"""
205
+
206
+ def add_assumptions(self, new_assumptions):
207
+ """
208
+ Add new assumptions to the assumption list.
209
+
210
+ :param new_assumptions: new assumptions
211
+ :type new_assumptions: list(sem.Expression)
212
+ """
213
+ self._assumptions.extend(new_assumptions)
214
+ self._result = None
215
+
216
+ def retract_assumptions(self, retracted, debug=False):
217
+ """
218
+ Retract assumptions from the assumption list.
219
+
220
+ :param debug: If True, give warning when ``retracted`` is not present on
221
+ assumptions list.
222
+ :type debug: bool
223
+ :param retracted: assumptions to be retracted
224
+ :type retracted: list(sem.Expression)
225
+ """
226
+ retracted = set(retracted)
227
+ result_list = list(filter(lambda a: a not in retracted, self._assumptions))
228
+ if debug and result_list == self._assumptions:
229
+ print(Warning("Assumptions list has not been changed:"))
230
+ self.print_assumptions()
231
+
232
+ self._assumptions = result_list
233
+
234
+ self._result = None
235
+
236
+ def assumptions(self):
237
+ """
238
+ List the current assumptions.
239
+
240
+ :return: list of ``Expression``
241
+ """
242
+ return self._assumptions
243
+
244
+ def goal(self):
245
+ """
246
+ Return the goal
247
+
248
+ :return: ``Expression``
249
+ """
250
+ return self._goal
251
+
252
+ def print_assumptions(self):
253
+ """
254
+ Print the list of the current assumptions.
255
+ """
256
+ for a in self.assumptions():
257
+ print(a)
258
+
259
+
260
+ class BaseProverCommand(BaseTheoremToolCommand, ProverCommand):
261
+ """
262
+ This class holds a ``Prover``, a goal, and a list of assumptions. When
263
+ prove() is called, the ``Prover`` is executed with the goal and assumptions.
264
+ """
265
+
266
+ def __init__(self, prover, goal=None, assumptions=None):
267
+ """
268
+ :param prover: The theorem tool to execute with the assumptions
269
+ :type prover: Prover
270
+ :see: ``BaseTheoremToolCommand``
271
+ """
272
+ self._prover = prover
273
+ """The theorem tool to execute with the assumptions"""
274
+
275
+ BaseTheoremToolCommand.__init__(self, goal, assumptions)
276
+
277
+ self._proof = None
278
+
279
+ def prove(self, verbose=False):
280
+ """
281
+ Perform the actual proof. Store the result to prevent unnecessary
282
+ re-proving.
283
+ """
284
+ if self._result is None:
285
+ self._result, self._proof = self._prover._prove(
286
+ self.goal(), self.assumptions(), verbose
287
+ )
288
+ return self._result
289
+
290
+ def proof(self, simplify=True):
291
+ """
292
+ Return the proof string
293
+ :param simplify: bool simplify the proof?
294
+ :return: str
295
+ """
296
+ if self._result is None:
297
+ raise LookupError("You have to call prove() first to get a proof!")
298
+ else:
299
+ return self.decorate_proof(self._proof, simplify)
300
+
301
+ def decorate_proof(self, proof_string, simplify=True):
302
+ """
303
+ Modify and return the proof string
304
+ :param proof_string: str the proof to decorate
305
+ :param simplify: bool simplify the proof?
306
+ :return: str
307
+ """
308
+ return proof_string
309
+
310
+ def get_prover(self):
311
+ return self._prover
312
+
313
+
314
+ class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand):
315
+ """
316
+ This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When
317
+ build_model() is called, the ``ModelBuilder`` is executed with the goal and
318
+ assumptions.
319
+ """
320
+
321
+ def __init__(self, modelbuilder, goal=None, assumptions=None):
322
+ """
323
+ :param modelbuilder: The theorem tool to execute with the assumptions
324
+ :type modelbuilder: ModelBuilder
325
+ :see: ``BaseTheoremToolCommand``
326
+ """
327
+ self._modelbuilder = modelbuilder
328
+ """The theorem tool to execute with the assumptions"""
329
+
330
+ BaseTheoremToolCommand.__init__(self, goal, assumptions)
331
+
332
+ self._model = None
333
+
334
+ def build_model(self, verbose=False):
335
+ """
336
+ Attempt to build a model. Store the result to prevent unnecessary
337
+ re-building.
338
+ """
339
+ if self._result is None:
340
+ self._result, self._model = self._modelbuilder._build_model(
341
+ self.goal(), self.assumptions(), verbose
342
+ )
343
+ return self._result
344
+
345
+ def model(self, format=None):
346
+ """
347
+ Return a string representation of the model
348
+
349
+ :param simplify: bool simplify the proof?
350
+ :return: str
351
+ """
352
+ if self._result is None:
353
+ raise LookupError("You have to call build_model() first to " "get a model!")
354
+ else:
355
+ return self._decorate_model(self._model, format)
356
+
357
+ def _decorate_model(self, valuation_str, format=None):
358
+ """
359
+ :param valuation_str: str with the model builder's output
360
+ :param format: str indicating the format for displaying
361
+ :return: str
362
+ """
363
+ return valuation_str
364
+
365
+ def get_model_builder(self):
366
+ return self._modelbuilder
367
+
368
+
369
+ class TheoremToolCommandDecorator(TheoremToolCommand):
370
+ """
371
+ A base decorator for the ``ProverCommandDecorator`` and
372
+ ``ModelBuilderCommandDecorator`` classes from which decorators can extend.
373
+ """
374
+
375
+ def __init__(self, command):
376
+ """
377
+ :param command: ``TheoremToolCommand`` to decorate
378
+ """
379
+ self._command = command
380
+
381
+ # The decorator has its own versions of 'result' different from the
382
+ # underlying command
383
+ self._result = None
384
+
385
+ def assumptions(self):
386
+ return self._command.assumptions()
387
+
388
+ def goal(self):
389
+ return self._command.goal()
390
+
391
+ def add_assumptions(self, new_assumptions):
392
+ self._command.add_assumptions(new_assumptions)
393
+ self._result = None
394
+
395
+ def retract_assumptions(self, retracted, debug=False):
396
+ self._command.retract_assumptions(retracted, debug)
397
+ self._result = None
398
+
399
+ def print_assumptions(self):
400
+ self._command.print_assumptions()
401
+
402
+
403
+ class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand):
404
+ """
405
+ A base decorator for the ``ProverCommand`` class from which other
406
+ prover command decorators can extend.
407
+ """
408
+
409
+ def __init__(self, proverCommand):
410
+ """
411
+ :param proverCommand: ``ProverCommand`` to decorate
412
+ """
413
+ TheoremToolCommandDecorator.__init__(self, proverCommand)
414
+
415
+ # The decorator has its own versions of 'result' and 'proof'
416
+ # because they may be different from the underlying command
417
+ self._proof = None
418
+
419
+ def prove(self, verbose=False):
420
+ if self._result is None:
421
+ prover = self.get_prover()
422
+ self._result, self._proof = prover._prove(
423
+ self.goal(), self.assumptions(), verbose
424
+ )
425
+ return self._result
426
+
427
+ def proof(self, simplify=True):
428
+ """
429
+ Return the proof string
430
+ :param simplify: bool simplify the proof?
431
+ :return: str
432
+ """
433
+ if self._result is None:
434
+ raise LookupError("You have to call prove() first to get a proof!")
435
+ else:
436
+ return self.decorate_proof(self._proof, simplify)
437
+
438
+ def decorate_proof(self, proof_string, simplify=True):
439
+ """
440
+ Modify and return the proof string
441
+ :param proof_string: str the proof to decorate
442
+ :param simplify: bool simplify the proof?
443
+ :return: str
444
+ """
445
+ return self._command.decorate_proof(proof_string, simplify)
446
+
447
+ def get_prover(self):
448
+ return self._command.get_prover()
449
+
450
+
451
+ class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand):
452
+ """
453
+ A base decorator for the ``ModelBuilderCommand`` class from which other
454
+ prover command decorators can extend.
455
+ """
456
+
457
+ def __init__(self, modelBuilderCommand):
458
+ """
459
+ :param modelBuilderCommand: ``ModelBuilderCommand`` to decorate
460
+ """
461
+ TheoremToolCommandDecorator.__init__(self, modelBuilderCommand)
462
+
463
+ # The decorator has its own versions of 'result' and 'valuation'
464
+ # because they may be different from the underlying command
465
+ self._model = None
466
+
467
+ def build_model(self, verbose=False):
468
+ """
469
+ Attempt to build a model. Store the result to prevent unnecessary
470
+ re-building.
471
+ """
472
+ if self._result is None:
473
+ modelbuilder = self.get_model_builder()
474
+ self._result, self._model = modelbuilder._build_model(
475
+ self.goal(), self.assumptions(), verbose
476
+ )
477
+ return self._result
478
+
479
+ def model(self, format=None):
480
+ """
481
+ Return a string representation of the model
482
+
483
+ :param simplify: bool simplify the proof?
484
+ :return: str
485
+ """
486
+ if self._result is None:
487
+ raise LookupError("You have to call build_model() first to " "get a model!")
488
+ else:
489
+ return self._decorate_model(self._model, format)
490
+
491
+ def _decorate_model(self, valuation_str, format=None):
492
+ """
493
+ Modify and return the proof string
494
+ :param valuation_str: str with the model builder's output
495
+ :param format: str indicating the format for displaying
496
+ :return: str
497
+ """
498
+ return self._command._decorate_model(valuation_str, format)
499
+
500
+ def get_model_builder(self):
501
+ return self._command.get_prover()
502
+
503
+
504
+ class ParallelProverBuilder(Prover, ModelBuilder):
505
+ """
506
+ This class stores both a prover and a model builder and when either
507
+ prove() or build_model() is called, then both theorem tools are run in
508
+ parallel. Whichever finishes first, the prover or the model builder, is the
509
+ result that will be used.
510
+ """
511
+
512
+ def __init__(self, prover, modelbuilder):
513
+ self._prover = prover
514
+ self._modelbuilder = modelbuilder
515
+
516
+ def _prove(self, goal=None, assumptions=None, verbose=False):
517
+ return self._run(goal, assumptions, verbose), ""
518
+
519
+ def _build_model(self, goal=None, assumptions=None, verbose=False):
520
+ return not self._run(goal, assumptions, verbose), ""
521
+
522
+ def _run(self, goal, assumptions, verbose):
523
+ # Set up two thread, Prover and ModelBuilder to run in parallel
524
+ tp_thread = TheoremToolThread(
525
+ lambda: self._prover.prove(goal, assumptions, verbose), verbose, "TP"
526
+ )
527
+ mb_thread = TheoremToolThread(
528
+ lambda: self._modelbuilder.build_model(goal, assumptions, verbose),
529
+ verbose,
530
+ "MB",
531
+ )
532
+
533
+ tp_thread.start()
534
+ mb_thread.start()
535
+
536
+ while tp_thread.is_alive() and mb_thread.is_alive():
537
+ # wait until either the prover or the model builder is done
538
+ pass
539
+
540
+ if tp_thread.result is not None:
541
+ return tp_thread.result
542
+ elif mb_thread.result is not None:
543
+ return not mb_thread.result
544
+ else:
545
+ return None
546
+
547
+
548
+ class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand):
549
+ """
550
+ This command stores both a prover and a model builder and when either
551
+ prove() or build_model() is called, then both theorem tools are run in
552
+ parallel. Whichever finishes first, the prover or the model builder, is the
553
+ result that will be used.
554
+
555
+ Because the theorem prover result is the opposite of the model builder
556
+ result, we will treat self._result as meaning "proof found/no model found".
557
+ """
558
+
559
+ def __init__(self, prover, modelbuilder, goal=None, assumptions=None):
560
+ BaseProverCommand.__init__(self, prover, goal, assumptions)
561
+ BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions)
562
+
563
+ def prove(self, verbose=False):
564
+ return self._run(verbose)
565
+
566
+ def build_model(self, verbose=False):
567
+ return not self._run(verbose)
568
+
569
+ def _run(self, verbose):
570
+ # Set up two thread, Prover and ModelBuilder to run in parallel
571
+ tp_thread = TheoremToolThread(
572
+ lambda: BaseProverCommand.prove(self, verbose), verbose, "TP"
573
+ )
574
+ mb_thread = TheoremToolThread(
575
+ lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, "MB"
576
+ )
577
+
578
+ tp_thread.start()
579
+ mb_thread.start()
580
+
581
+ while tp_thread.is_alive() and mb_thread.is_alive():
582
+ # wait until either the prover or the model builder is done
583
+ pass
584
+
585
+ if tp_thread.result is not None:
586
+ self._result = tp_thread.result
587
+ elif mb_thread.result is not None:
588
+ self._result = not mb_thread.result
589
+ return self._result
590
+
591
+
592
+ class TheoremToolThread(threading.Thread):
593
+ def __init__(self, command, verbose, name=None):
594
+ threading.Thread.__init__(self)
595
+ self._command = command
596
+ self._result = None
597
+ self._verbose = verbose
598
+ self._name = name
599
+
600
+ def run(self):
601
+ try:
602
+ self._result = self._command()
603
+ if self._verbose:
604
+ print(
605
+ "Thread %s finished with result %s at %s"
606
+ % (self._name, self._result, time.localtime(time.time()))
607
+ )
608
+ except Exception as e:
609
+ print(e)
610
+ print("Thread %s completed abnormally" % (self._name))
611
+
612
+ @property
613
+ def result(self):
614
+ return self._result
llmeval-env/lib/python3.10/site-packages/nltk/inference/discourse.py ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Discourse Processing
2
+ #
3
+ # Author: Ewan Klein <[email protected]>
4
+ # Dan Garrette <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ r"""
10
+ Module for incrementally developing simple discourses, and checking for semantic ambiguity,
11
+ consistency and informativeness.
12
+
13
+ Many of the ideas are based on the CURT family of programs of Blackburn and Bos
14
+ (see http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html).
15
+
16
+ Consistency checking is carried out by using the ``mace`` module to call the Mace4 model builder.
17
+ Informativeness checking is carried out with a call to ``Prover.prove()`` from
18
+ the ``inference`` module.
19
+
20
+ ``DiscourseTester`` is a constructor for discourses.
21
+ The basic data structure is a list of sentences, stored as ``self._sentences``. Each sentence in the list
22
+ is assigned a "sentence ID" (``sid``) of the form ``s``\ *i*. For example::
23
+
24
+ s0: A boxer walks
25
+ s1: Every boxer chases a girl
26
+
27
+ Each sentence can be ambiguous between a number of readings, each of which receives a
28
+ "reading ID" (``rid``) of the form ``s``\ *i* -``r``\ *j*. For example::
29
+
30
+ s0 readings:
31
+
32
+ s0-r1: some x.(boxer(x) & walk(x))
33
+ s0-r0: some x.(boxerdog(x) & walk(x))
34
+
35
+ A "thread" is a list of readings, represented as a list of ``rid``\ s.
36
+ Each thread receives a "thread ID" (``tid``) of the form ``d``\ *i*.
37
+ For example::
38
+
39
+ d0: ['s0-r0', 's1-r0']
40
+
41
+ The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences.
42
+ (This is not intended to scale beyond very short discourses!) The method ``readings(filter=True)`` will only show
43
+ those threads which are consistent (taking into account any background assumptions).
44
+ """
45
+
46
+ import os
47
+ from abc import ABCMeta, abstractmethod
48
+ from functools import reduce
49
+ from operator import add, and_
50
+
51
+ from nltk.data import show_cfg
52
+ from nltk.inference.mace import MaceCommand
53
+ from nltk.inference.prover9 import Prover9Command
54
+ from nltk.parse import load_parser
55
+ from nltk.parse.malt import MaltParser
56
+ from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora
57
+ from nltk.sem.glue import DrtGlue
58
+ from nltk.sem.logic import Expression
59
+ from nltk.tag import RegexpTagger
60
+
61
+
62
+ class ReadingCommand(metaclass=ABCMeta):
63
+ @abstractmethod
64
+ def parse_to_readings(self, sentence):
65
+ """
66
+ :param sentence: the sentence to read
67
+ :type sentence: str
68
+ """
69
+
70
+ def process_thread(self, sentence_readings):
71
+ """
72
+ This method should be used to handle dependencies between readings such
73
+ as resolving anaphora.
74
+
75
+ :param sentence_readings: readings to process
76
+ :type sentence_readings: list(Expression)
77
+ :return: the list of readings after processing
78
+ :rtype: list(Expression)
79
+ """
80
+ return sentence_readings
81
+
82
+ @abstractmethod
83
+ def combine_readings(self, readings):
84
+ """
85
+ :param readings: readings to combine
86
+ :type readings: list(Expression)
87
+ :return: one combined reading
88
+ :rtype: Expression
89
+ """
90
+
91
+ @abstractmethod
92
+ def to_fol(self, expression):
93
+ """
94
+ Convert this expression into a First-Order Logic expression.
95
+
96
+ :param expression: an expression
97
+ :type expression: Expression
98
+ :return: a FOL version of the input expression
99
+ :rtype: Expression
100
+ """
101
+
102
+
103
+ class CfgReadingCommand(ReadingCommand):
104
+ def __init__(self, gramfile=None):
105
+ """
106
+ :param gramfile: name of file where grammar can be loaded
107
+ :type gramfile: str
108
+ """
109
+ self._gramfile = (
110
+ gramfile if gramfile else "grammars/book_grammars/discourse.fcfg"
111
+ )
112
+ self._parser = load_parser(self._gramfile)
113
+
114
+ def parse_to_readings(self, sentence):
115
+ """:see: ReadingCommand.parse_to_readings()"""
116
+ from nltk.sem import root_semrep
117
+
118
+ tokens = sentence.split()
119
+ trees = self._parser.parse(tokens)
120
+ return [root_semrep(tree) for tree in trees]
121
+
122
+ def combine_readings(self, readings):
123
+ """:see: ReadingCommand.combine_readings()"""
124
+ return reduce(and_, readings)
125
+
126
+ def to_fol(self, expression):
127
+ """:see: ReadingCommand.to_fol()"""
128
+ return expression
129
+
130
+
131
+ class DrtGlueReadingCommand(ReadingCommand):
132
+ def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None):
133
+ """
134
+ :param semtype_file: name of file where grammar can be loaded
135
+ :param remove_duplicates: should duplicates be removed?
136
+ :param depparser: the dependency parser
137
+ """
138
+ if semtype_file is None:
139
+ semtype_file = os.path.join(
140
+ "grammars", "sample_grammars", "drt_glue.semtype"
141
+ )
142
+ self._glue = DrtGlue(
143
+ semtype_file=semtype_file,
144
+ remove_duplicates=remove_duplicates,
145
+ depparser=depparser,
146
+ )
147
+
148
+ def parse_to_readings(self, sentence):
149
+ """:see: ReadingCommand.parse_to_readings()"""
150
+ return self._glue.parse_to_meaning(sentence)
151
+
152
+ def process_thread(self, sentence_readings):
153
+ """:see: ReadingCommand.process_thread()"""
154
+ try:
155
+ return [self.combine_readings(sentence_readings)]
156
+ except AnaphoraResolutionException:
157
+ return []
158
+
159
+ def combine_readings(self, readings):
160
+ """:see: ReadingCommand.combine_readings()"""
161
+ thread_reading = reduce(add, readings)
162
+ return resolve_anaphora(thread_reading.simplify())
163
+
164
+ def to_fol(self, expression):
165
+ """:see: ReadingCommand.to_fol()"""
166
+ return expression.fol()
167
+
168
+
169
+ class DiscourseTester:
170
+ """
171
+ Check properties of an ongoing discourse.
172
+ """
173
+
174
+ def __init__(self, input, reading_command=None, background=None):
175
+ """
176
+ Initialize a ``DiscourseTester``.
177
+
178
+ :param input: the discourse sentences
179
+ :type input: list of str
180
+ :param background: Formulas which express background assumptions
181
+ :type background: list(Expression)
182
+ """
183
+ self._input = input
184
+ self._sentences = {"s%s" % i: sent for i, sent in enumerate(input)}
185
+ self._models = None
186
+ self._readings = {}
187
+ self._reading_command = (
188
+ reading_command if reading_command else CfgReadingCommand()
189
+ )
190
+ self._threads = {}
191
+ self._filtered_threads = {}
192
+ if background is not None:
193
+ from nltk.sem.logic import Expression
194
+
195
+ for e in background:
196
+ assert isinstance(e, Expression)
197
+ self._background = background
198
+ else:
199
+ self._background = []
200
+
201
+ ###############################
202
+ # Sentences
203
+ ###############################
204
+
205
+ def sentences(self):
206
+ """
207
+ Display the list of sentences in the current discourse.
208
+ """
209
+ for id in sorted(self._sentences):
210
+ print(f"{id}: {self._sentences[id]}")
211
+
212
+ def add_sentence(self, sentence, informchk=False, consistchk=False):
213
+ """
214
+ Add a sentence to the current discourse.
215
+
216
+ Updates ``self._input`` and ``self._sentences``.
217
+ :param sentence: An input sentence
218
+ :type sentence: str
219
+ :param informchk: if ``True``, check that the result of adding the sentence is thread-informative. Updates ``self._readings``.
220
+ :param consistchk: if ``True``, check that the result of adding the sentence is thread-consistent. Updates ``self._readings``.
221
+
222
+ """
223
+ # check whether the new sentence is informative (i.e. not entailed by the previous discourse)
224
+ if informchk:
225
+ self.readings(verbose=False)
226
+ for tid in sorted(self._threads):
227
+ assumptions = [reading for (rid, reading) in self.expand_threads(tid)]
228
+ assumptions += self._background
229
+ for sent_reading in self._get_readings(sentence):
230
+ tp = Prover9Command(goal=sent_reading, assumptions=assumptions)
231
+ if tp.prove():
232
+ print(
233
+ "Sentence '%s' under reading '%s':"
234
+ % (sentence, str(sent_reading))
235
+ )
236
+ print("Not informative relative to thread '%s'" % tid)
237
+
238
+ self._input.append(sentence)
239
+ self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)}
240
+ # check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of
241
+ # of assumptions
242
+ if consistchk:
243
+ self.readings(verbose=False)
244
+ self.models(show=False)
245
+
246
+ def retract_sentence(self, sentence, verbose=True):
247
+ """
248
+ Remove a sentence from the current discourse.
249
+
250
+ Updates ``self._input``, ``self._sentences`` and ``self._readings``.
251
+ :param sentence: An input sentence
252
+ :type sentence: str
253
+ :param verbose: If ``True``, report on the updated list of sentences.
254
+ """
255
+ try:
256
+ self._input.remove(sentence)
257
+ except ValueError:
258
+ print(
259
+ "Retraction failed. The sentence '%s' is not part of the current discourse:"
260
+ % sentence
261
+ )
262
+ self.sentences()
263
+ return None
264
+ self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)}
265
+ self.readings(verbose=False)
266
+ if verbose:
267
+ print("Current sentences are ")
268
+ self.sentences()
269
+
270
+ def grammar(self):
271
+ """
272
+ Print out the grammar in use for parsing input sentences
273
+ """
274
+ show_cfg(self._reading_command._gramfile)
275
+
276
+ ###############################
277
+ # Readings and Threads
278
+ ###############################
279
+
280
+ def _get_readings(self, sentence):
281
+ """
282
+ Build a list of semantic readings for a sentence.
283
+
284
+ :rtype: list(Expression)
285
+ """
286
+ return self._reading_command.parse_to_readings(sentence)
287
+
288
+ def _construct_readings(self):
289
+ """
290
+ Use ``self._sentences`` to construct a value for ``self._readings``.
291
+ """
292
+ # re-initialize self._readings in case we have retracted a sentence
293
+ self._readings = {}
294
+ for sid in sorted(self._sentences):
295
+ sentence = self._sentences[sid]
296
+ readings = self._get_readings(sentence)
297
+ self._readings[sid] = {
298
+ f"{sid}-r{rid}": reading.simplify()
299
+ for rid, reading in enumerate(sorted(readings, key=str))
300
+ }
301
+
302
+ def _construct_threads(self):
303
+ """
304
+ Use ``self._readings`` to construct a value for ``self._threads``
305
+ and use the model builder to construct a value for ``self._filtered_threads``
306
+ """
307
+ thread_list = [[]]
308
+ for sid in sorted(self._readings):
309
+ thread_list = self.multiply(thread_list, sorted(self._readings[sid]))
310
+ self._threads = {"d%s" % tid: thread for tid, thread in enumerate(thread_list)}
311
+ # re-initialize the filtered threads
312
+ self._filtered_threads = {}
313
+ # keep the same ids, but only include threads which get models
314
+ consistency_checked = self._check_consistency(self._threads)
315
+ for (tid, thread) in self._threads.items():
316
+ if (tid, True) in consistency_checked:
317
+ self._filtered_threads[tid] = thread
318
+
319
+ def _show_readings(self, sentence=None):
320
+ """
321
+ Print out the readings for the discourse (or a single sentence).
322
+ """
323
+ if sentence is not None:
324
+ print("The sentence '%s' has these readings:" % sentence)
325
+ for r in [str(reading) for reading in (self._get_readings(sentence))]:
326
+ print(" %s" % r)
327
+ else:
328
+ for sid in sorted(self._readings):
329
+ print()
330
+ print("%s readings:" % sid)
331
+ print() #'-' * 30
332
+ for rid in sorted(self._readings[sid]):
333
+ lf = self._readings[sid][rid]
334
+ print(f"{rid}: {lf.normalize()}")
335
+
336
+ def _show_threads(self, filter=False, show_thread_readings=False):
337
+ """
338
+ Print out the value of ``self._threads`` or ``self._filtered_hreads``
339
+ """
340
+ threads = self._filtered_threads if filter else self._threads
341
+ for tid in sorted(threads):
342
+ if show_thread_readings:
343
+ readings = [
344
+ self._readings[rid.split("-")[0]][rid] for rid in self._threads[tid]
345
+ ]
346
+ try:
347
+ thread_reading = (
348
+ ": %s"
349
+ % self._reading_command.combine_readings(readings).normalize()
350
+ )
351
+ except Exception as e:
352
+ thread_reading = ": INVALID: %s" % e.__class__.__name__
353
+ else:
354
+ thread_reading = ""
355
+
356
+ print("%s:" % tid, self._threads[tid], thread_reading)
357
+
358
+ def readings(
359
+ self,
360
+ sentence=None,
361
+ threaded=False,
362
+ verbose=True,
363
+ filter=False,
364
+ show_thread_readings=False,
365
+ ):
366
+ """
367
+ Construct and show the readings of the discourse (or of a single sentence).
368
+
369
+ :param sentence: test just this sentence
370
+ :type sentence: str
371
+ :param threaded: if ``True``, print out each thread ID and the corresponding thread.
372
+ :param filter: if ``True``, only print out consistent thread IDs and threads.
373
+ """
374
+ self._construct_readings()
375
+ self._construct_threads()
376
+
377
+ # if we are filtering or showing thread readings, show threads
378
+ if filter or show_thread_readings:
379
+ threaded = True
380
+
381
+ if verbose:
382
+ if not threaded:
383
+ self._show_readings(sentence=sentence)
384
+ else:
385
+ self._show_threads(
386
+ filter=filter, show_thread_readings=show_thread_readings
387
+ )
388
+
389
+ def expand_threads(self, thread_id, threads=None):
390
+ """
391
+ Given a thread ID, find the list of ``logic.Expression`` objects corresponding to the reading IDs in that thread.
392
+
393
+ :param thread_id: thread ID
394
+ :type thread_id: str
395
+ :param threads: a mapping from thread IDs to lists of reading IDs
396
+ :type threads: dict
397
+ :return: A list of pairs ``(rid, reading)`` where reading is the ``logic.Expression`` associated with a reading ID
398
+ :rtype: list of tuple
399
+ """
400
+ if threads is None:
401
+ threads = self._threads
402
+ return [
403
+ (rid, self._readings[sid][rid])
404
+ for rid in threads[thread_id]
405
+ for sid in rid.split("-")[:1]
406
+ ]
407
+
408
+ ###############################
409
+ # Models and Background
410
+ ###############################
411
+
412
+ def _check_consistency(self, threads, show=False, verbose=False):
413
+ results = []
414
+ for tid in sorted(threads):
415
+ assumptions = [
416
+ reading for (rid, reading) in self.expand_threads(tid, threads=threads)
417
+ ]
418
+ assumptions = list(
419
+ map(
420
+ self._reading_command.to_fol,
421
+ self._reading_command.process_thread(assumptions),
422
+ )
423
+ )
424
+ if assumptions:
425
+ assumptions += self._background
426
+ # if Mace4 finds a model, it always seems to find it quickly
427
+ mb = MaceCommand(None, assumptions, max_models=20)
428
+ modelfound = mb.build_model()
429
+ else:
430
+ modelfound = False
431
+ results.append((tid, modelfound))
432
+ if show:
433
+ spacer(80)
434
+ print("Model for Discourse Thread %s" % tid)
435
+ spacer(80)
436
+ if verbose:
437
+ for a in assumptions:
438
+ print(a)
439
+ spacer(80)
440
+ if modelfound:
441
+ print(mb.model(format="cooked"))
442
+ else:
443
+ print("No model found!\n")
444
+ return results
445
+
446
+ def models(self, thread_id=None, show=True, verbose=False):
447
+ """
448
+ Call Mace4 to build a model for each current discourse thread.
449
+
450
+ :param thread_id: thread ID
451
+ :type thread_id: str
452
+ :param show: If ``True``, display the model that has been found.
453
+ """
454
+ self._construct_readings()
455
+ self._construct_threads()
456
+ threads = {thread_id: self._threads[thread_id]} if thread_id else self._threads
457
+
458
+ for (tid, modelfound) in self._check_consistency(
459
+ threads, show=show, verbose=verbose
460
+ ):
461
+ idlist = [rid for rid in threads[tid]]
462
+
463
+ if not modelfound:
464
+ print(f"Inconsistent discourse: {tid} {idlist}:")
465
+ for rid, reading in self.expand_threads(tid):
466
+ print(f" {rid}: {reading.normalize()}")
467
+ print()
468
+ else:
469
+ print(f"Consistent discourse: {tid} {idlist}:")
470
+ for rid, reading in self.expand_threads(tid):
471
+ print(f" {rid}: {reading.normalize()}")
472
+ print()
473
+
474
+ def add_background(self, background, verbose=False):
475
+ """
476
+ Add a list of background assumptions for reasoning about the discourse.
477
+
478
+ When called, this method also updates the discourse model's set of readings and threads.
479
+ :param background: Formulas which contain background information
480
+ :type background: list(Expression)
481
+ """
482
+ from nltk.sem.logic import Expression
483
+
484
+ for (count, e) in enumerate(background):
485
+ assert isinstance(e, Expression)
486
+ if verbose:
487
+ print("Adding assumption %s to background" % count)
488
+ self._background.append(e)
489
+
490
+ # update the state
491
+ self._construct_readings()
492
+ self._construct_threads()
493
+
494
+ def background(self):
495
+ """
496
+ Show the current background assumptions.
497
+ """
498
+ for e in self._background:
499
+ print(str(e))
500
+
501
+ ###############################
502
+ # Misc
503
+ ###############################
504
+
505
+ @staticmethod
506
+ def multiply(discourse, readings):
507
+ """
508
+ Multiply every thread in ``discourse`` by every reading in ``readings``.
509
+
510
+ Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns
511
+ [['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']]
512
+
513
+ :param discourse: the current list of readings
514
+ :type discourse: list of lists
515
+ :param readings: an additional list of readings
516
+ :type readings: list(Expression)
517
+ :rtype: A list of lists
518
+ """
519
+ result = []
520
+ for sublist in discourse:
521
+ for r in readings:
522
+ new = []
523
+ new += sublist
524
+ new.append(r)
525
+ result.append(new)
526
+ return result
527
+
528
+
529
+ def load_fol(s):
530
+ """
531
+ Temporarily duplicated from ``nltk.sem.util``.
532
+ Convert a file of first order formulas into a list of ``Expression`` objects.
533
+
534
+ :param s: the contents of the file
535
+ :type s: str
536
+ :return: a list of parsed formulas.
537
+ :rtype: list(Expression)
538
+ """
539
+ statements = []
540
+ for linenum, line in enumerate(s.splitlines()):
541
+ line = line.strip()
542
+ if line.startswith("#") or line == "":
543
+ continue
544
+ try:
545
+ statements.append(Expression.fromstring(line))
546
+ except Exception as e:
547
+ raise ValueError(f"Unable to parse line {linenum}: {line}") from e
548
+ return statements
549
+
550
+
551
+ ###############################
552
+ # Demo
553
+ ###############################
554
+ def discourse_demo(reading_command=None):
555
+ """
556
+ Illustrate the various methods of ``DiscourseTester``
557
+ """
558
+ dt = DiscourseTester(
559
+ ["A boxer walks", "Every boxer chases a girl"], reading_command
560
+ )
561
+ dt.models()
562
+ print()
563
+ # dt.grammar()
564
+ print()
565
+ dt.sentences()
566
+ print()
567
+ dt.readings()
568
+ print()
569
+ dt.readings(threaded=True)
570
+ print()
571
+ dt.models("d1")
572
+ dt.add_sentence("John is a boxer")
573
+ print()
574
+ dt.sentences()
575
+ print()
576
+ dt.readings(threaded=True)
577
+ print()
578
+ dt = DiscourseTester(
579
+ ["A student dances", "Every student is a person"], reading_command
580
+ )
581
+ print()
582
+ dt.add_sentence("No person dances", consistchk=True)
583
+ print()
584
+ dt.readings()
585
+ print()
586
+ dt.retract_sentence("No person dances", verbose=True)
587
+ print()
588
+ dt.models()
589
+ print()
590
+ dt.readings("A person dances")
591
+ print()
592
+ dt.add_sentence("A person dances", informchk=True)
593
+ dt = DiscourseTester(
594
+ ["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"],
595
+ reading_command,
596
+ )
597
+ dt.readings(filter=True)
598
+ import nltk.data
599
+
600
+ background_file = os.path.join("grammars", "book_grammars", "background.fol")
601
+ background = nltk.data.load(background_file)
602
+
603
+ print()
604
+ dt.add_background(background, verbose=False)
605
+ dt.background()
606
+ print()
607
+ dt.readings(filter=True)
608
+ print()
609
+ dt.models()
610
+
611
+
612
+ def drt_discourse_demo(reading_command=None):
613
+ """
614
+ Illustrate the various methods of ``DiscourseTester``
615
+ """
616
+ dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command)
617
+ dt.models()
618
+ print()
619
+ dt.sentences()
620
+ print()
621
+ dt.readings()
622
+ print()
623
+ dt.readings(show_thread_readings=True)
624
+ print()
625
+ dt.readings(filter=True, show_thread_readings=True)
626
+
627
+
628
+ def spacer(num=30):
629
+ print("-" * num)
630
+
631
+
632
+ def demo():
633
+ discourse_demo()
634
+
635
+ tagger = RegexpTagger(
636
+ [
637
+ ("^(chases|runs)$", "VB"),
638
+ ("^(a)$", "ex_quant"),
639
+ ("^(every)$", "univ_quant"),
640
+ ("^(dog|boy)$", "NN"),
641
+ ("^(he)$", "PRP"),
642
+ ]
643
+ )
644
+ depparser = MaltParser(tagger=tagger)
645
+ drt_discourse_demo(
646
+ DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser)
647
+ )
648
+
649
+
650
+ if __name__ == "__main__":
651
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/mace.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the Mace4 Model Builder
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ # Ewan Klein <[email protected]>
5
+
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A model builder that makes use of the external 'Mace4' package.
11
+ """
12
+
13
+ import os
14
+ import tempfile
15
+
16
+ from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder
17
+ from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
18
+ from nltk.sem import Expression, Valuation
19
+ from nltk.sem.logic import is_indvar
20
+
21
+
22
+ class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand):
23
+ """
24
+ A ``MaceCommand`` specific to the ``Mace`` model builder. It contains
25
+ a print_assumptions() method that is used to print the list
26
+ of assumptions in multiple formats.
27
+ """
28
+
29
+ _interpformat_bin = None
30
+
31
+ def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None):
32
+ """
33
+ :param goal: Input expression to prove
34
+ :type goal: sem.Expression
35
+ :param assumptions: Input expressions to use as assumptions in
36
+ the proof.
37
+ :type assumptions: list(sem.Expression)
38
+ :param max_models: The maximum number of models that Mace will try before
39
+ simply returning false. (Use 0 for no maximum.)
40
+ :type max_models: int
41
+ """
42
+ if model_builder is not None:
43
+ assert isinstance(model_builder, Mace)
44
+ else:
45
+ model_builder = Mace(max_models)
46
+
47
+ BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions)
48
+
49
+ @property
50
+ def valuation(mbc):
51
+ return mbc.model("valuation")
52
+
53
+ def _convert2val(self, valuation_str):
54
+ """
55
+ Transform the output file into an NLTK-style Valuation.
56
+
57
+ :return: A model if one is generated; None otherwise.
58
+ :rtype: sem.Valuation
59
+ """
60
+ valuation_standard_format = self._transform_output(valuation_str, "standard")
61
+
62
+ val = []
63
+ for line in valuation_standard_format.splitlines(False):
64
+ l = line.strip()
65
+
66
+ if l.startswith("interpretation"):
67
+ # find the number of entities in the model
68
+ num_entities = int(l[l.index("(") + 1 : l.index(",")].strip())
69
+
70
+ elif l.startswith("function") and l.find("_") == -1:
71
+ # replace the integer identifier with a corresponding alphabetic character
72
+ name = l[l.index("(") + 1 : l.index(",")].strip()
73
+ if is_indvar(name):
74
+ name = name.upper()
75
+ value = int(l[l.index("[") + 1 : l.index("]")].strip())
76
+ val.append((name, MaceCommand._make_model_var(value)))
77
+
78
+ elif l.startswith("relation"):
79
+ l = l[l.index("(") + 1 :]
80
+ if "(" in l:
81
+ # relation is not nullary
82
+ name = l[: l.index("(")].strip()
83
+ values = [
84
+ int(v.strip())
85
+ for v in l[l.index("[") + 1 : l.index("]")].split(",")
86
+ ]
87
+ val.append(
88
+ (name, MaceCommand._make_relation_set(num_entities, values))
89
+ )
90
+ else:
91
+ # relation is nullary
92
+ name = l[: l.index(",")].strip()
93
+ value = int(l[l.index("[") + 1 : l.index("]")].strip())
94
+ val.append((name, value == 1))
95
+
96
+ return Valuation(val)
97
+
98
+ @staticmethod
99
+ def _make_relation_set(num_entities, values):
100
+ """
101
+ Convert a Mace4-style relation table into a dictionary.
102
+
103
+ :param num_entities: the number of entities in the model; determines the row length in the table.
104
+ :type num_entities: int
105
+ :param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model.
106
+ :type values: list of int
107
+ """
108
+ r = set()
109
+ for position in [pos for (pos, v) in enumerate(values) if v == 1]:
110
+ r.add(
111
+ tuple(MaceCommand._make_relation_tuple(position, values, num_entities))
112
+ )
113
+ return r
114
+
115
+ @staticmethod
116
+ def _make_relation_tuple(position, values, num_entities):
117
+ if len(values) == 1:
118
+ return []
119
+ else:
120
+ sublist_size = len(values) // num_entities
121
+ sublist_start = position // sublist_size
122
+ sublist_position = int(position % sublist_size)
123
+
124
+ sublist = values[
125
+ sublist_start * sublist_size : (sublist_start + 1) * sublist_size
126
+ ]
127
+ return [
128
+ MaceCommand._make_model_var(sublist_start)
129
+ ] + MaceCommand._make_relation_tuple(
130
+ sublist_position, sublist, num_entities
131
+ )
132
+
133
+ @staticmethod
134
+ def _make_model_var(value):
135
+ """
136
+ Pick an alphabetic character as identifier for an entity in the model.
137
+
138
+ :param value: where to index into the list of characters
139
+ :type value: int
140
+ """
141
+ letter = [
142
+ "a",
143
+ "b",
144
+ "c",
145
+ "d",
146
+ "e",
147
+ "f",
148
+ "g",
149
+ "h",
150
+ "i",
151
+ "j",
152
+ "k",
153
+ "l",
154
+ "m",
155
+ "n",
156
+ "o",
157
+ "p",
158
+ "q",
159
+ "r",
160
+ "s",
161
+ "t",
162
+ "u",
163
+ "v",
164
+ "w",
165
+ "x",
166
+ "y",
167
+ "z",
168
+ ][value]
169
+ num = value // 26
170
+ return letter + str(num) if num > 0 else letter
171
+
172
+ def _decorate_model(self, valuation_str, format):
173
+ """
174
+ Print out a Mace4 model using any Mace4 ``interpformat`` format.
175
+ See https://www.cs.unm.edu/~mccune/mace4/manual/ for details.
176
+
177
+ :param valuation_str: str with the model builder's output
178
+ :param format: str indicating the format for displaying
179
+ models. Defaults to 'standard' format.
180
+ :return: str
181
+ """
182
+ if not format:
183
+ return valuation_str
184
+ elif format == "valuation":
185
+ return self._convert2val(valuation_str)
186
+ else:
187
+ return self._transform_output(valuation_str, format)
188
+
189
+ def _transform_output(self, valuation_str, format):
190
+ """
191
+ Transform the output file into any Mace4 ``interpformat`` format.
192
+
193
+ :param format: Output format for displaying models.
194
+ :type format: str
195
+ """
196
+ if format in [
197
+ "standard",
198
+ "standard2",
199
+ "portable",
200
+ "tabular",
201
+ "raw",
202
+ "cooked",
203
+ "xml",
204
+ "tex",
205
+ ]:
206
+ return self._call_interpformat(valuation_str, [format])[0]
207
+ else:
208
+ raise LookupError("The specified format does not exist")
209
+
210
+ def _call_interpformat(self, input_str, args=[], verbose=False):
211
+ """
212
+ Call the ``interpformat`` binary with the given input.
213
+
214
+ :param input_str: A string whose contents are used as stdin.
215
+ :param args: A list of command-line arguments.
216
+ :return: A tuple (stdout, returncode)
217
+ :see: ``config_prover9``
218
+ """
219
+ if self._interpformat_bin is None:
220
+ self._interpformat_bin = self._modelbuilder._find_binary(
221
+ "interpformat", verbose
222
+ )
223
+
224
+ return self._modelbuilder._call(
225
+ input_str, self._interpformat_bin, args, verbose
226
+ )
227
+
228
+
229
+ class Mace(Prover9Parent, ModelBuilder):
230
+ _mace4_bin = None
231
+
232
+ def __init__(self, end_size=500):
233
+ self._end_size = end_size
234
+ """The maximum model size that Mace will try before
235
+ simply returning false. (Use -1 for no maximum.)"""
236
+
237
+ def _build_model(self, goal=None, assumptions=None, verbose=False):
238
+ """
239
+ Use Mace4 to build a first order model.
240
+
241
+ :return: ``True`` if a model was found (i.e. Mace returns value of 0),
242
+ else ``False``
243
+ """
244
+ if not assumptions:
245
+ assumptions = []
246
+
247
+ stdout, returncode = self._call_mace4(
248
+ self.prover9_input(goal, assumptions), verbose=verbose
249
+ )
250
+ return (returncode == 0, stdout)
251
+
252
+ def _call_mace4(self, input_str, args=[], verbose=False):
253
+ """
254
+ Call the ``mace4`` binary with the given input.
255
+
256
+ :param input_str: A string whose contents are used as stdin.
257
+ :param args: A list of command-line arguments.
258
+ :return: A tuple (stdout, returncode)
259
+ :see: ``config_prover9``
260
+ """
261
+ if self._mace4_bin is None:
262
+ self._mace4_bin = self._find_binary("mace4", verbose)
263
+
264
+ updated_input_str = ""
265
+ if self._end_size > 0:
266
+ updated_input_str += "assign(end_size, %d).\n\n" % self._end_size
267
+ updated_input_str += input_str
268
+
269
+ return self._call(updated_input_str, self._mace4_bin, args, verbose)
270
+
271
+
272
+ def spacer(num=30):
273
+ print("-" * num)
274
+
275
+
276
+ def decode_result(found):
277
+ """
278
+ Decode the result of model_found()
279
+
280
+ :param found: The output of model_found()
281
+ :type found: bool
282
+ """
283
+ return {True: "Countermodel found", False: "No countermodel found", None: "None"}[
284
+ found
285
+ ]
286
+
287
+
288
+ def test_model_found(arguments):
289
+ """
290
+ Try some proofs and exhibit the results.
291
+ """
292
+ for (goal, assumptions) in arguments:
293
+ g = Expression.fromstring(goal)
294
+ alist = [lp.parse(a) for a in assumptions]
295
+ m = MaceCommand(g, assumptions=alist, max_models=50)
296
+ found = m.build_model()
297
+ for a in alist:
298
+ print(" %s" % a)
299
+ print(f"|- {g}: {decode_result(found)}\n")
300
+
301
+
302
+ def test_build_model(arguments):
303
+ """
304
+ Try to build a ``nltk.sem.Valuation``.
305
+ """
306
+ g = Expression.fromstring("all x.man(x)")
307
+ alist = [
308
+ Expression.fromstring(a)
309
+ for a in [
310
+ "man(John)",
311
+ "man(Socrates)",
312
+ "man(Bill)",
313
+ "some x.(-(x = John) & man(x) & sees(John,x))",
314
+ "some x.(-(x = Bill) & man(x))",
315
+ "all x.some y.(man(x) -> gives(Socrates,x,y))",
316
+ ]
317
+ ]
318
+
319
+ m = MaceCommand(g, assumptions=alist)
320
+ m.build_model()
321
+ spacer()
322
+ print("Assumptions and Goal")
323
+ spacer()
324
+ for a in alist:
325
+ print(" %s" % a)
326
+ print(f"|- {g}: {decode_result(m.build_model())}\n")
327
+ spacer()
328
+ # print(m.model('standard'))
329
+ # print(m.model('cooked'))
330
+ print("Valuation")
331
+ spacer()
332
+ print(m.valuation, "\n")
333
+
334
+
335
+ def test_transform_output(argument_pair):
336
+ """
337
+ Transform the model into various Mace4 ``interpformat`` formats.
338
+ """
339
+ g = Expression.fromstring(argument_pair[0])
340
+ alist = [lp.parse(a) for a in argument_pair[1]]
341
+ m = MaceCommand(g, assumptions=alist)
342
+ m.build_model()
343
+ for a in alist:
344
+ print(" %s" % a)
345
+ print(f"|- {g}: {m.build_model()}\n")
346
+ for format in ["standard", "portable", "xml", "cooked"]:
347
+ spacer()
348
+ print("Using '%s' format" % format)
349
+ spacer()
350
+ print(m.model(format=format))
351
+
352
+
353
+ def test_make_relation_set():
354
+ print(
355
+ MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1])
356
+ == {("c",), ("a",)}
357
+ )
358
+ print(
359
+ MaceCommand._make_relation_set(
360
+ num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0]
361
+ )
362
+ == {("c", "a")}
363
+ )
364
+ print(
365
+ MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0])
366
+ == {("a", "b", "a"), ("b", "b", "a")}
367
+ )
368
+
369
+
370
+ arguments = [
371
+ ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
372
+ ("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
373
+ ]
374
+
375
+
376
+ def demo():
377
+ test_model_found(arguments)
378
+ test_build_model(arguments)
379
+ test_transform_output(arguments[1])
380
+
381
+
382
+ if __name__ == "__main__":
383
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Nonmonotonic Reasoning
2
+ #
3
+ # Author: Daniel H. Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A module to perform nonmonotonic reasoning. The ideas and demonstrations in
11
+ this module are based on "Logical Foundations of Artificial Intelligence" by
12
+ Michael R. Genesereth and Nils J. Nilsson.
13
+ """
14
+
15
+ from collections import defaultdict
16
+ from functools import reduce
17
+
18
+ from nltk.inference.api import Prover, ProverCommandDecorator
19
+ from nltk.inference.prover9 import Prover9, Prover9Command
20
+ from nltk.sem.logic import (
21
+ AbstractVariableExpression,
22
+ AllExpression,
23
+ AndExpression,
24
+ ApplicationExpression,
25
+ BooleanExpression,
26
+ EqualityExpression,
27
+ ExistsExpression,
28
+ Expression,
29
+ ImpExpression,
30
+ NegatedExpression,
31
+ Variable,
32
+ VariableExpression,
33
+ operator,
34
+ unique_variable,
35
+ )
36
+
37
+
38
+ class ProverParseError(Exception):
39
+ pass
40
+
41
+
42
+ def get_domain(goal, assumptions):
43
+ if goal is None:
44
+ all_expressions = assumptions
45
+ else:
46
+ all_expressions = assumptions + [-goal]
47
+ return reduce(operator.or_, (a.constants() for a in all_expressions), set())
48
+
49
+
50
+ class ClosedDomainProver(ProverCommandDecorator):
51
+ """
52
+ This is a prover decorator that adds domain closure assumptions before
53
+ proving.
54
+ """
55
+
56
+ def assumptions(self):
57
+ assumptions = [a for a in self._command.assumptions()]
58
+ goal = self._command.goal()
59
+ domain = get_domain(goal, assumptions)
60
+ return [self.replace_quants(ex, domain) for ex in assumptions]
61
+
62
+ def goal(self):
63
+ goal = self._command.goal()
64
+ domain = get_domain(goal, self._command.assumptions())
65
+ return self.replace_quants(goal, domain)
66
+
67
+ def replace_quants(self, ex, domain):
68
+ """
69
+ Apply the closed domain assumption to the expression
70
+
71
+ - Domain = union([e.free()|e.constants() for e in all_expressions])
72
+ - translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR
73
+ "P.replace(x, d1) | P.replace(x, d2) | ..."
74
+ - translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..."
75
+
76
+ :param ex: ``Expression``
77
+ :param domain: set of {Variable}s
78
+ :return: ``Expression``
79
+ """
80
+ if isinstance(ex, AllExpression):
81
+ conjuncts = [
82
+ ex.term.replace(ex.variable, VariableExpression(d)) for d in domain
83
+ ]
84
+ conjuncts = [self.replace_quants(c, domain) for c in conjuncts]
85
+ return reduce(lambda x, y: x & y, conjuncts)
86
+ elif isinstance(ex, BooleanExpression):
87
+ return ex.__class__(
88
+ self.replace_quants(ex.first, domain),
89
+ self.replace_quants(ex.second, domain),
90
+ )
91
+ elif isinstance(ex, NegatedExpression):
92
+ return -self.replace_quants(ex.term, domain)
93
+ elif isinstance(ex, ExistsExpression):
94
+ disjuncts = [
95
+ ex.term.replace(ex.variable, VariableExpression(d)) for d in domain
96
+ ]
97
+ disjuncts = [self.replace_quants(d, domain) for d in disjuncts]
98
+ return reduce(lambda x, y: x | y, disjuncts)
99
+ else:
100
+ return ex
101
+
102
+
103
+ class UniqueNamesProver(ProverCommandDecorator):
104
+ """
105
+ This is a prover decorator that adds unique names assumptions before
106
+ proving.
107
+ """
108
+
109
+ def assumptions(self):
110
+ """
111
+ - Domain = union([e.free()|e.constants() for e in all_expressions])
112
+ - if "d1 = d2" cannot be proven from the premises, then add "d1 != d2"
113
+ """
114
+ assumptions = self._command.assumptions()
115
+
116
+ domain = list(get_domain(self._command.goal(), assumptions))
117
+
118
+ # build a dictionary of obvious equalities
119
+ eq_sets = SetHolder()
120
+ for a in assumptions:
121
+ if isinstance(a, EqualityExpression):
122
+ av = a.first.variable
123
+ bv = a.second.variable
124
+ # put 'a' and 'b' in the same set
125
+ eq_sets[av].add(bv)
126
+
127
+ new_assumptions = []
128
+ for i, a in enumerate(domain):
129
+ for b in domain[i + 1 :]:
130
+ # if a and b are not already in the same equality set
131
+ if b not in eq_sets[a]:
132
+ newEqEx = EqualityExpression(
133
+ VariableExpression(a), VariableExpression(b)
134
+ )
135
+ if Prover9().prove(newEqEx, assumptions):
136
+ # we can prove that the names are the same entity.
137
+ # remember that they are equal so we don't re-check.
138
+ eq_sets[a].add(b)
139
+ else:
140
+ # we can't prove it, so assume unique names
141
+ new_assumptions.append(-newEqEx)
142
+
143
+ return assumptions + new_assumptions
144
+
145
+
146
+ class SetHolder(list):
147
+ """
148
+ A list of sets of Variables.
149
+ """
150
+
151
+ def __getitem__(self, item):
152
+ """
153
+ :param item: ``Variable``
154
+ :return: the set containing 'item'
155
+ """
156
+ assert isinstance(item, Variable)
157
+ for s in self:
158
+ if item in s:
159
+ return s
160
+ # item is not found in any existing set. so create a new set
161
+ new = {item}
162
+ self.append(new)
163
+ return new
164
+
165
+
166
+ class ClosedWorldProver(ProverCommandDecorator):
167
+ """
168
+ This is a prover decorator that completes predicates before proving.
169
+
170
+ If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P".
171
+ If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird".
172
+ If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P".
173
+
174
+ walk(Socrates)
175
+ Socrates != Bill
176
+ + all x.(walk(x) -> (x=Socrates))
177
+ ----------------
178
+ -walk(Bill)
179
+
180
+ see(Socrates, John)
181
+ see(John, Mary)
182
+ Socrates != John
183
+ John != Mary
184
+ + all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary)))
185
+ ----------------
186
+ -see(Socrates, Mary)
187
+
188
+ all x.(ostrich(x) -> bird(x))
189
+ bird(Tweety)
190
+ -ostrich(Sam)
191
+ Sam != Tweety
192
+ + all x.(bird(x) -> (ostrich(x) | x=Tweety))
193
+ + all x.-ostrich(x)
194
+ -------------------
195
+ -bird(Sam)
196
+ """
197
+
198
+ def assumptions(self):
199
+ assumptions = self._command.assumptions()
200
+
201
+ predicates = self._make_predicate_dict(assumptions)
202
+
203
+ new_assumptions = []
204
+ for p in predicates:
205
+ predHolder = predicates[p]
206
+ new_sig = self._make_unique_signature(predHolder)
207
+ new_sig_exs = [VariableExpression(v) for v in new_sig]
208
+
209
+ disjuncts = []
210
+
211
+ # Turn the signatures into disjuncts
212
+ for sig in predHolder.signatures:
213
+ equality_exs = []
214
+ for v1, v2 in zip(new_sig_exs, sig):
215
+ equality_exs.append(EqualityExpression(v1, v2))
216
+ disjuncts.append(reduce(lambda x, y: x & y, equality_exs))
217
+
218
+ # Turn the properties into disjuncts
219
+ for prop in predHolder.properties:
220
+ # replace variables from the signature with new sig variables
221
+ bindings = {}
222
+ for v1, v2 in zip(new_sig_exs, prop[0]):
223
+ bindings[v2] = v1
224
+ disjuncts.append(prop[1].substitute_bindings(bindings))
225
+
226
+ # make the assumption
227
+ if disjuncts:
228
+ # disjuncts exist, so make an implication
229
+ antecedent = self._make_antecedent(p, new_sig)
230
+ consequent = reduce(lambda x, y: x | y, disjuncts)
231
+ accum = ImpExpression(antecedent, consequent)
232
+ else:
233
+ # nothing has property 'p'
234
+ accum = NegatedExpression(self._make_antecedent(p, new_sig))
235
+
236
+ # quantify the implication
237
+ for new_sig_var in new_sig[::-1]:
238
+ accum = AllExpression(new_sig_var, accum)
239
+ new_assumptions.append(accum)
240
+
241
+ return assumptions + new_assumptions
242
+
243
+ def _make_unique_signature(self, predHolder):
244
+ """
245
+ This method figures out how many arguments the predicate takes and
246
+ returns a tuple containing that number of unique variables.
247
+ """
248
+ return tuple(unique_variable() for i in range(predHolder.signature_len))
249
+
250
+ def _make_antecedent(self, predicate, signature):
251
+ """
252
+ Return an application expression with 'predicate' as the predicate
253
+ and 'signature' as the list of arguments.
254
+ """
255
+ antecedent = predicate
256
+ for v in signature:
257
+ antecedent = antecedent(VariableExpression(v))
258
+ return antecedent
259
+
260
+ def _make_predicate_dict(self, assumptions):
261
+ """
262
+ Create a dictionary of predicates from the assumptions.
263
+
264
+ :param assumptions: a list of ``Expression``s
265
+ :return: dict mapping ``AbstractVariableExpression`` to ``PredHolder``
266
+ """
267
+ predicates = defaultdict(PredHolder)
268
+ for a in assumptions:
269
+ self._map_predicates(a, predicates)
270
+ return predicates
271
+
272
+ def _map_predicates(self, expression, predDict):
273
+ if isinstance(expression, ApplicationExpression):
274
+ func, args = expression.uncurry()
275
+ if isinstance(func, AbstractVariableExpression):
276
+ predDict[func].append_sig(tuple(args))
277
+ elif isinstance(expression, AndExpression):
278
+ self._map_predicates(expression.first, predDict)
279
+ self._map_predicates(expression.second, predDict)
280
+ elif isinstance(expression, AllExpression):
281
+ # collect all the universally quantified variables
282
+ sig = [expression.variable]
283
+ term = expression.term
284
+ while isinstance(term, AllExpression):
285
+ sig.append(term.variable)
286
+ term = term.term
287
+ if isinstance(term, ImpExpression):
288
+ if isinstance(term.first, ApplicationExpression) and isinstance(
289
+ term.second, ApplicationExpression
290
+ ):
291
+ func1, args1 = term.first.uncurry()
292
+ func2, args2 = term.second.uncurry()
293
+ if (
294
+ isinstance(func1, AbstractVariableExpression)
295
+ and isinstance(func2, AbstractVariableExpression)
296
+ and sig == [v.variable for v in args1]
297
+ and sig == [v.variable for v in args2]
298
+ ):
299
+ predDict[func2].append_prop((tuple(sig), term.first))
300
+ predDict[func1].validate_sig_len(sig)
301
+
302
+
303
+ class PredHolder:
304
+ """
305
+ This class will be used by a dictionary that will store information
306
+ about predicates to be used by the ``ClosedWorldProver``.
307
+
308
+ The 'signatures' property is a list of tuples defining signatures for
309
+ which the predicate is true. For instance, 'see(john, mary)' would be
310
+ result in the signature '(john,mary)' for 'see'.
311
+
312
+ The second element of the pair is a list of pairs such that the first
313
+ element of the pair is a tuple of variables and the second element is an
314
+ expression of those variables that makes the predicate true. For instance,
315
+ 'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))"
316
+ for 'know'.
317
+ """
318
+
319
+ def __init__(self):
320
+ self.signatures = []
321
+ self.properties = []
322
+ self.signature_len = None
323
+
324
+ def append_sig(self, new_sig):
325
+ self.validate_sig_len(new_sig)
326
+ self.signatures.append(new_sig)
327
+
328
+ def append_prop(self, new_prop):
329
+ self.validate_sig_len(new_prop[0])
330
+ self.properties.append(new_prop)
331
+
332
+ def validate_sig_len(self, new_sig):
333
+ if self.signature_len is None:
334
+ self.signature_len = len(new_sig)
335
+ elif self.signature_len != len(new_sig):
336
+ raise Exception("Signature lengths do not match")
337
+
338
+ def __str__(self):
339
+ return f"({self.signatures},{self.properties},{self.signature_len})"
340
+
341
+ def __repr__(self):
342
+ return "%s" % self
343
+
344
+
345
+ def closed_domain_demo():
346
+ lexpr = Expression.fromstring
347
+
348
+ p1 = lexpr(r"exists x.walk(x)")
349
+ p2 = lexpr(r"man(Socrates)")
350
+ c = lexpr(r"walk(Socrates)")
351
+ prover = Prover9Command(c, [p1, p2])
352
+ print(prover.prove())
353
+ cdp = ClosedDomainProver(prover)
354
+ print("assumptions:")
355
+ for a in cdp.assumptions():
356
+ print(" ", a)
357
+ print("goal:", cdp.goal())
358
+ print(cdp.prove())
359
+
360
+ p1 = lexpr(r"exists x.walk(x)")
361
+ p2 = lexpr(r"man(Socrates)")
362
+ p3 = lexpr(r"-walk(Bill)")
363
+ c = lexpr(r"walk(Socrates)")
364
+ prover = Prover9Command(c, [p1, p2, p3])
365
+ print(prover.prove())
366
+ cdp = ClosedDomainProver(prover)
367
+ print("assumptions:")
368
+ for a in cdp.assumptions():
369
+ print(" ", a)
370
+ print("goal:", cdp.goal())
371
+ print(cdp.prove())
372
+
373
+ p1 = lexpr(r"exists x.walk(x)")
374
+ p2 = lexpr(r"man(Socrates)")
375
+ p3 = lexpr(r"-walk(Bill)")
376
+ c = lexpr(r"walk(Socrates)")
377
+ prover = Prover9Command(c, [p1, p2, p3])
378
+ print(prover.prove())
379
+ cdp = ClosedDomainProver(prover)
380
+ print("assumptions:")
381
+ for a in cdp.assumptions():
382
+ print(" ", a)
383
+ print("goal:", cdp.goal())
384
+ print(cdp.prove())
385
+
386
+ p1 = lexpr(r"walk(Socrates)")
387
+ p2 = lexpr(r"walk(Bill)")
388
+ c = lexpr(r"all x.walk(x)")
389
+ prover = Prover9Command(c, [p1, p2])
390
+ print(prover.prove())
391
+ cdp = ClosedDomainProver(prover)
392
+ print("assumptions:")
393
+ for a in cdp.assumptions():
394
+ print(" ", a)
395
+ print("goal:", cdp.goal())
396
+ print(cdp.prove())
397
+
398
+ p1 = lexpr(r"girl(mary)")
399
+ p2 = lexpr(r"dog(rover)")
400
+ p3 = lexpr(r"all x.(girl(x) -> -dog(x))")
401
+ p4 = lexpr(r"all x.(dog(x) -> -girl(x))")
402
+ p5 = lexpr(r"chase(mary, rover)")
403
+ c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))")
404
+ prover = Prover9Command(c, [p1, p2, p3, p4, p5])
405
+ print(prover.prove())
406
+ cdp = ClosedDomainProver(prover)
407
+ print("assumptions:")
408
+ for a in cdp.assumptions():
409
+ print(" ", a)
410
+ print("goal:", cdp.goal())
411
+ print(cdp.prove())
412
+
413
+
414
+ def unique_names_demo():
415
+ lexpr = Expression.fromstring
416
+
417
+ p1 = lexpr(r"man(Socrates)")
418
+ p2 = lexpr(r"man(Bill)")
419
+ c = lexpr(r"exists x.exists y.(x != y)")
420
+ prover = Prover9Command(c, [p1, p2])
421
+ print(prover.prove())
422
+ unp = UniqueNamesProver(prover)
423
+ print("assumptions:")
424
+ for a in unp.assumptions():
425
+ print(" ", a)
426
+ print("goal:", unp.goal())
427
+ print(unp.prove())
428
+
429
+ p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))")
430
+ p2 = lexpr(r"Bill = William")
431
+ p3 = lexpr(r"Bill = Billy")
432
+ c = lexpr(r"-walk(William)")
433
+ prover = Prover9Command(c, [p1, p2, p3])
434
+ print(prover.prove())
435
+ unp = UniqueNamesProver(prover)
436
+ print("assumptions:")
437
+ for a in unp.assumptions():
438
+ print(" ", a)
439
+ print("goal:", unp.goal())
440
+ print(unp.prove())
441
+
442
+
443
+ def closed_world_demo():
444
+ lexpr = Expression.fromstring
445
+
446
+ p1 = lexpr(r"walk(Socrates)")
447
+ p2 = lexpr(r"(Socrates != Bill)")
448
+ c = lexpr(r"-walk(Bill)")
449
+ prover = Prover9Command(c, [p1, p2])
450
+ print(prover.prove())
451
+ cwp = ClosedWorldProver(prover)
452
+ print("assumptions:")
453
+ for a in cwp.assumptions():
454
+ print(" ", a)
455
+ print("goal:", cwp.goal())
456
+ print(cwp.prove())
457
+
458
+ p1 = lexpr(r"see(Socrates, John)")
459
+ p2 = lexpr(r"see(John, Mary)")
460
+ p3 = lexpr(r"(Socrates != John)")
461
+ p4 = lexpr(r"(John != Mary)")
462
+ c = lexpr(r"-see(Socrates, Mary)")
463
+ prover = Prover9Command(c, [p1, p2, p3, p4])
464
+ print(prover.prove())
465
+ cwp = ClosedWorldProver(prover)
466
+ print("assumptions:")
467
+ for a in cwp.assumptions():
468
+ print(" ", a)
469
+ print("goal:", cwp.goal())
470
+ print(cwp.prove())
471
+
472
+ p1 = lexpr(r"all x.(ostrich(x) -> bird(x))")
473
+ p2 = lexpr(r"bird(Tweety)")
474
+ p3 = lexpr(r"-ostrich(Sam)")
475
+ p4 = lexpr(r"Sam != Tweety")
476
+ c = lexpr(r"-bird(Sam)")
477
+ prover = Prover9Command(c, [p1, p2, p3, p4])
478
+ print(prover.prove())
479
+ cwp = ClosedWorldProver(prover)
480
+ print("assumptions:")
481
+ for a in cwp.assumptions():
482
+ print(" ", a)
483
+ print("goal:", cwp.goal())
484
+ print(cwp.prove())
485
+
486
+
487
+ def combination_prover_demo():
488
+ lexpr = Expression.fromstring
489
+
490
+ p1 = lexpr(r"see(Socrates, John)")
491
+ p2 = lexpr(r"see(John, Mary)")
492
+ c = lexpr(r"-see(Socrates, Mary)")
493
+ prover = Prover9Command(c, [p1, p2])
494
+ print(prover.prove())
495
+ command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover)))
496
+ for a in command.assumptions():
497
+ print(a)
498
+ print(command.prove())
499
+
500
+
501
+ def default_reasoning_demo():
502
+ lexpr = Expression.fromstring
503
+
504
+ premises = []
505
+
506
+ # define taxonomy
507
+ premises.append(lexpr(r"all x.(elephant(x) -> animal(x))"))
508
+ premises.append(lexpr(r"all x.(bird(x) -> animal(x))"))
509
+ premises.append(lexpr(r"all x.(dove(x) -> bird(x))"))
510
+ premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))"))
511
+ premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))"))
512
+
513
+ # default properties
514
+ premises.append(
515
+ lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))")
516
+ ) # normal animals don't fly
517
+ premises.append(
518
+ lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))")
519
+ ) # normal birds fly
520
+ premises.append(
521
+ lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))")
522
+ ) # normal ostriches don't fly
523
+
524
+ # specify abnormal entities
525
+ premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight
526
+ premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird
527
+ premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich
528
+
529
+ # define entities
530
+ premises.append(lexpr(r"elephant(E)"))
531
+ premises.append(lexpr(r"dove(D)"))
532
+ premises.append(lexpr(r"ostrich(O)"))
533
+
534
+ # print the assumptions
535
+ prover = Prover9Command(None, premises)
536
+ command = UniqueNamesProver(ClosedWorldProver(prover))
537
+ for a in command.assumptions():
538
+ print(a)
539
+
540
+ print_proof("-fly(E)", premises)
541
+ print_proof("fly(D)", premises)
542
+ print_proof("-fly(O)", premises)
543
+
544
+
545
+ def print_proof(goal, premises):
546
+ lexpr = Expression.fromstring
547
+ prover = Prover9Command(lexpr(goal), premises)
548
+ command = UniqueNamesProver(ClosedWorldProver(prover))
549
+ print(goal, prover.prove(), command.prove())
550
+
551
+
552
+ def demo():
553
+ closed_domain_demo()
554
+ unique_names_demo()
555
+ closed_world_demo()
556
+ combination_prover_demo()
557
+ default_reasoning_demo()
558
+
559
+
560
+ if __name__ == "__main__":
561
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/prover9.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the Prover9 Theorem Prover
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Garrette <[email protected]>
5
+ # Ewan Klein <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+ """
10
+ A theorem prover that makes use of the external 'Prover9' package.
11
+ """
12
+
13
+ import os
14
+ import subprocess
15
+
16
+ import nltk
17
+ from nltk.inference.api import BaseProverCommand, Prover
18
+ from nltk.sem.logic import (
19
+ AllExpression,
20
+ AndExpression,
21
+ EqualityExpression,
22
+ ExistsExpression,
23
+ Expression,
24
+ IffExpression,
25
+ ImpExpression,
26
+ NegatedExpression,
27
+ OrExpression,
28
+ )
29
+
30
+ #
31
+ # Following is not yet used. Return code for 2 actually realized as 512.
32
+ #
33
+ p9_return_codes = {
34
+ 0: True,
35
+ 1: "(FATAL)", # A fatal error occurred (user's syntax error).
36
+ 2: False, # (SOS_EMPTY) Prover9 ran out of things to do
37
+ # (sos list exhausted).
38
+ 3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded.
39
+ 4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded.
40
+ 5: "(MAX_GIVEN)", # The max_given parameter was exceeded.
41
+ 6: "(MAX_KEPT)", # The max_kept parameter was exceeded.
42
+ 7: "(ACTION)", # A Prover9 action terminated the search.
43
+ 101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug.
44
+ }
45
+
46
+
47
+ class Prover9CommandParent:
48
+ """
49
+ A common base class used by both ``Prover9Command`` and ``MaceCommand``,
50
+ which is responsible for maintaining a goal and a set of assumptions,
51
+ and generating prover9-style input files from them.
52
+ """
53
+
54
+ def print_assumptions(self, output_format="nltk"):
55
+ """
56
+ Print the list of the current assumptions.
57
+ """
58
+ if output_format.lower() == "nltk":
59
+ for a in self.assumptions():
60
+ print(a)
61
+ elif output_format.lower() == "prover9":
62
+ for a in convert_to_prover9(self.assumptions()):
63
+ print(a)
64
+ else:
65
+ raise NameError(
66
+ "Unrecognized value for 'output_format': %s" % output_format
67
+ )
68
+
69
+
70
+ class Prover9Command(Prover9CommandParent, BaseProverCommand):
71
+ """
72
+ A ``ProverCommand`` specific to the ``Prover9`` prover. It contains
73
+ the a print_assumptions() method that is used to print the list
74
+ of assumptions in multiple formats.
75
+ """
76
+
77
+ def __init__(self, goal=None, assumptions=None, timeout=60, prover=None):
78
+ """
79
+ :param goal: Input expression to prove
80
+ :type goal: sem.Expression
81
+ :param assumptions: Input expressions to use as assumptions in
82
+ the proof.
83
+ :type assumptions: list(sem.Expression)
84
+ :param timeout: number of seconds before timeout; set to 0 for
85
+ no timeout.
86
+ :type timeout: int
87
+ :param prover: a prover. If not set, one will be created.
88
+ :type prover: Prover9
89
+ """
90
+ if not assumptions:
91
+ assumptions = []
92
+
93
+ if prover is not None:
94
+ assert isinstance(prover, Prover9)
95
+ else:
96
+ prover = Prover9(timeout)
97
+
98
+ BaseProverCommand.__init__(self, prover, goal, assumptions)
99
+
100
+ def decorate_proof(self, proof_string, simplify=True):
101
+ """
102
+ :see BaseProverCommand.decorate_proof()
103
+ """
104
+ if simplify:
105
+ return self._prover._call_prooftrans(proof_string, ["striplabels"])[
106
+ 0
107
+ ].rstrip()
108
+ else:
109
+ return proof_string.rstrip()
110
+
111
+
112
+ class Prover9Parent:
113
+ """
114
+ A common class extended by both ``Prover9`` and ``Mace <mace.Mace>``.
115
+ It contains the functionality required to convert NLTK-style
116
+ expressions into Prover9-style expressions.
117
+ """
118
+
119
+ _binary_location = None
120
+
121
+ def config_prover9(self, binary_location, verbose=False):
122
+ if binary_location is None:
123
+ self._binary_location = None
124
+ self._prover9_bin = None
125
+ else:
126
+ name = "prover9"
127
+ self._prover9_bin = nltk.internals.find_binary(
128
+ name,
129
+ path_to_bin=binary_location,
130
+ env_vars=["PROVER9"],
131
+ url="https://www.cs.unm.edu/~mccune/prover9/",
132
+ binary_names=[name, name + ".exe"],
133
+ verbose=verbose,
134
+ )
135
+ self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1)
136
+
137
+ def prover9_input(self, goal, assumptions):
138
+ """
139
+ :return: The input string that should be provided to the
140
+ prover9 binary. This string is formed based on the goal,
141
+ assumptions, and timeout value of this object.
142
+ """
143
+ s = ""
144
+
145
+ if assumptions:
146
+ s += "formulas(assumptions).\n"
147
+ for p9_assumption in convert_to_prover9(assumptions):
148
+ s += " %s.\n" % p9_assumption
149
+ s += "end_of_list.\n\n"
150
+
151
+ if goal:
152
+ s += "formulas(goals).\n"
153
+ s += " %s.\n" % convert_to_prover9(goal)
154
+ s += "end_of_list.\n\n"
155
+
156
+ return s
157
+
158
+ def binary_locations(self):
159
+ """
160
+ A list of directories that should be searched for the prover9
161
+ executables. This list is used by ``config_prover9`` when searching
162
+ for the prover9 executables.
163
+ """
164
+ return [
165
+ "/usr/local/bin/prover9",
166
+ "/usr/local/bin/prover9/bin",
167
+ "/usr/local/bin",
168
+ "/usr/bin",
169
+ "/usr/local/prover9",
170
+ "/usr/local/share/prover9",
171
+ ]
172
+
173
+ def _find_binary(self, name, verbose=False):
174
+ binary_locations = self.binary_locations()
175
+ if self._binary_location is not None:
176
+ binary_locations += [self._binary_location]
177
+ return nltk.internals.find_binary(
178
+ name,
179
+ searchpath=binary_locations,
180
+ env_vars=["PROVER9"],
181
+ url="https://www.cs.unm.edu/~mccune/prover9/",
182
+ binary_names=[name, name + ".exe"],
183
+ verbose=verbose,
184
+ )
185
+
186
+ def _call(self, input_str, binary, args=[], verbose=False):
187
+ """
188
+ Call the binary with the given input.
189
+
190
+ :param input_str: A string whose contents are used as stdin.
191
+ :param binary: The location of the binary to call
192
+ :param args: A list of command-line arguments.
193
+ :return: A tuple (stdout, returncode)
194
+ :see: ``config_prover9``
195
+ """
196
+ if verbose:
197
+ print("Calling:", binary)
198
+ print("Args:", args)
199
+ print("Input:\n", input_str, "\n")
200
+
201
+ # Call prover9 via a subprocess
202
+ cmd = [binary] + args
203
+ try:
204
+ input_str = input_str.encode("utf8")
205
+ except AttributeError:
206
+ pass
207
+ p = subprocess.Popen(
208
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
209
+ )
210
+ (stdout, stderr) = p.communicate(input=input_str)
211
+
212
+ if verbose:
213
+ print("Return code:", p.returncode)
214
+ if stdout:
215
+ print("stdout:\n", stdout, "\n")
216
+ if stderr:
217
+ print("stderr:\n", stderr, "\n")
218
+
219
+ return (stdout.decode("utf-8"), p.returncode)
220
+
221
+
222
+ def convert_to_prover9(input):
223
+ """
224
+ Convert a ``logic.Expression`` to Prover9 format.
225
+ """
226
+ if isinstance(input, list):
227
+ result = []
228
+ for s in input:
229
+ try:
230
+ result.append(_convert_to_prover9(s.simplify()))
231
+ except:
232
+ print("input %s cannot be converted to Prover9 input syntax" % input)
233
+ raise
234
+ return result
235
+ else:
236
+ try:
237
+ return _convert_to_prover9(input.simplify())
238
+ except:
239
+ print("input %s cannot be converted to Prover9 input syntax" % input)
240
+ raise
241
+
242
+
243
+ def _convert_to_prover9(expression):
244
+ """
245
+ Convert ``logic.Expression`` to Prover9 formatted string.
246
+ """
247
+ if isinstance(expression, ExistsExpression):
248
+ return (
249
+ "exists "
250
+ + str(expression.variable)
251
+ + " "
252
+ + _convert_to_prover9(expression.term)
253
+ )
254
+ elif isinstance(expression, AllExpression):
255
+ return (
256
+ "all "
257
+ + str(expression.variable)
258
+ + " "
259
+ + _convert_to_prover9(expression.term)
260
+ )
261
+ elif isinstance(expression, NegatedExpression):
262
+ return "-(" + _convert_to_prover9(expression.term) + ")"
263
+ elif isinstance(expression, AndExpression):
264
+ return (
265
+ "("
266
+ + _convert_to_prover9(expression.first)
267
+ + " & "
268
+ + _convert_to_prover9(expression.second)
269
+ + ")"
270
+ )
271
+ elif isinstance(expression, OrExpression):
272
+ return (
273
+ "("
274
+ + _convert_to_prover9(expression.first)
275
+ + " | "
276
+ + _convert_to_prover9(expression.second)
277
+ + ")"
278
+ )
279
+ elif isinstance(expression, ImpExpression):
280
+ return (
281
+ "("
282
+ + _convert_to_prover9(expression.first)
283
+ + " -> "
284
+ + _convert_to_prover9(expression.second)
285
+ + ")"
286
+ )
287
+ elif isinstance(expression, IffExpression):
288
+ return (
289
+ "("
290
+ + _convert_to_prover9(expression.first)
291
+ + " <-> "
292
+ + _convert_to_prover9(expression.second)
293
+ + ")"
294
+ )
295
+ elif isinstance(expression, EqualityExpression):
296
+ return (
297
+ "("
298
+ + _convert_to_prover9(expression.first)
299
+ + " = "
300
+ + _convert_to_prover9(expression.second)
301
+ + ")"
302
+ )
303
+ else:
304
+ return str(expression)
305
+
306
+
307
+ class Prover9(Prover9Parent, Prover):
308
+ _prover9_bin = None
309
+ _prooftrans_bin = None
310
+
311
+ def __init__(self, timeout=60):
312
+ self._timeout = timeout
313
+ """The timeout value for prover9. If a proof can not be found
314
+ in this amount of time, then prover9 will return false.
315
+ (Use 0 for no timeout.)"""
316
+
317
+ def _prove(self, goal=None, assumptions=None, verbose=False):
318
+ """
319
+ Use Prover9 to prove a theorem.
320
+ :return: A pair whose first element is a boolean indicating if the
321
+ proof was successful (i.e. returns value of 0) and whose second element
322
+ is the output of the prover.
323
+ """
324
+ if not assumptions:
325
+ assumptions = []
326
+
327
+ stdout, returncode = self._call_prover9(
328
+ self.prover9_input(goal, assumptions), verbose=verbose
329
+ )
330
+ return (returncode == 0, stdout)
331
+
332
+ def prover9_input(self, goal, assumptions):
333
+ """
334
+ :see: Prover9Parent.prover9_input
335
+ """
336
+ s = "clear(auto_denials).\n" # only one proof required
337
+ return s + Prover9Parent.prover9_input(self, goal, assumptions)
338
+
339
+ def _call_prover9(self, input_str, args=[], verbose=False):
340
+ """
341
+ Call the ``prover9`` binary with the given input.
342
+
343
+ :param input_str: A string whose contents are used as stdin.
344
+ :param args: A list of command-line arguments.
345
+ :return: A tuple (stdout, returncode)
346
+ :see: ``config_prover9``
347
+ """
348
+ if self._prover9_bin is None:
349
+ self._prover9_bin = self._find_binary("prover9", verbose)
350
+
351
+ updated_input_str = ""
352
+ if self._timeout > 0:
353
+ updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout
354
+ updated_input_str += input_str
355
+
356
+ stdout, returncode = self._call(
357
+ updated_input_str, self._prover9_bin, args, verbose
358
+ )
359
+
360
+ if returncode not in [0, 2]:
361
+ errormsgprefix = "%%ERROR:"
362
+ if errormsgprefix in stdout:
363
+ msgstart = stdout.index(errormsgprefix)
364
+ errormsg = stdout[msgstart:].strip()
365
+ else:
366
+ errormsg = None
367
+ if returncode in [3, 4, 5, 6]:
368
+ raise Prover9LimitExceededException(returncode, errormsg)
369
+ else:
370
+ raise Prover9FatalException(returncode, errormsg)
371
+
372
+ return stdout, returncode
373
+
374
+ def _call_prooftrans(self, input_str, args=[], verbose=False):
375
+ """
376
+ Call the ``prooftrans`` binary with the given input.
377
+
378
+ :param input_str: A string whose contents are used as stdin.
379
+ :param args: A list of command-line arguments.
380
+ :return: A tuple (stdout, returncode)
381
+ :see: ``config_prover9``
382
+ """
383
+ if self._prooftrans_bin is None:
384
+ self._prooftrans_bin = self._find_binary("prooftrans", verbose)
385
+
386
+ return self._call(input_str, self._prooftrans_bin, args, verbose)
387
+
388
+
389
+ class Prover9Exception(Exception):
390
+ def __init__(self, returncode, message):
391
+ msg = p9_return_codes[returncode]
392
+ if message:
393
+ msg += "\n%s" % message
394
+ Exception.__init__(self, msg)
395
+
396
+
397
+ class Prover9FatalException(Prover9Exception):
398
+ pass
399
+
400
+
401
+ class Prover9LimitExceededException(Prover9Exception):
402
+ pass
403
+
404
+
405
+ ######################################################################
406
+ # { Tests and Demos
407
+ ######################################################################
408
+
409
+
410
+ def test_config():
411
+
412
+ a = Expression.fromstring("(walk(j) & sing(j))")
413
+ g = Expression.fromstring("walk(j)")
414
+ p = Prover9Command(g, assumptions=[a])
415
+ p._executable_path = None
416
+ p.prover9_search = []
417
+ p.prove()
418
+ # config_prover9('/usr/local/bin')
419
+ print(p.prove())
420
+ print(p.proof())
421
+
422
+
423
+ def test_convert_to_prover9(expr):
424
+ """
425
+ Test that parsing works OK.
426
+ """
427
+ for t in expr:
428
+ e = Expression.fromstring(t)
429
+ print(convert_to_prover9(e))
430
+
431
+
432
+ def test_prove(arguments):
433
+ """
434
+ Try some proofs and exhibit the results.
435
+ """
436
+ for (goal, assumptions) in arguments:
437
+ g = Expression.fromstring(goal)
438
+ alist = [Expression.fromstring(a) for a in assumptions]
439
+ p = Prover9Command(g, assumptions=alist).prove()
440
+ for a in alist:
441
+ print(" %s" % a)
442
+ print(f"|- {g}: {p}\n")
443
+
444
+
445
+ arguments = [
446
+ ("(man(x) <-> (not (not man(x))))", []),
447
+ ("(not (man(x) & (not man(x))))", []),
448
+ ("(man(x) | (not man(x)))", []),
449
+ ("(man(x) & (not man(x)))", []),
450
+ ("(man(x) -> man(x))", []),
451
+ ("(not (man(x) & (not man(x))))", []),
452
+ ("(man(x) | (not man(x)))", []),
453
+ ("(man(x) -> man(x))", []),
454
+ ("(man(x) <-> man(x))", []),
455
+ ("(not (man(x) <-> (not man(x))))", []),
456
+ ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
457
+ ("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []),
458
+ ("(all x.man(x) -> all x.man(x))", []),
459
+ ("some x.all y.sees(x,y)", []),
460
+ (
461
+ "some e3.(walk(e3) & subj(e3, mary))",
462
+ [
463
+ "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
464
+ ],
465
+ ),
466
+ (
467
+ "some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))",
468
+ [
469
+ "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
470
+ ],
471
+ ),
472
+ ]
473
+
474
+ expressions = [
475
+ r"some x y.sees(x,y)",
476
+ r"some x.(man(x) & walks(x))",
477
+ r"\x.(man(x) & walks(x))",
478
+ r"\x y.sees(x,y)",
479
+ r"walks(john)",
480
+ r"\x.big(x, \y.mouse(y))",
481
+ r"(walks(x) & (runs(x) & (threes(x) & fours(x))))",
482
+ r"(walks(x) -> runs(x))",
483
+ r"some x.(PRO(x) & sees(John, x))",
484
+ r"some x.(man(x) & (not walks(x)))",
485
+ r"all x.(man(x) -> walks(x))",
486
+ ]
487
+
488
+
489
+ def spacer(num=45):
490
+ print("-" * num)
491
+
492
+
493
+ def demo():
494
+ print("Testing configuration")
495
+ spacer()
496
+ test_config()
497
+ print()
498
+ print("Testing conversion to Prover9 format")
499
+ spacer()
500
+ test_convert_to_prover9(expressions)
501
+ print()
502
+ print("Testing proofs")
503
+ spacer()
504
+ test_prove(arguments)
505
+
506
+
507
+ if __name__ == "__main__":
508
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/resolution.py ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: First-order Resolution-based Theorem Prover
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Module for a resolution-based First Order theorem prover.
11
+ """
12
+
13
+ import operator
14
+ from collections import defaultdict
15
+ from functools import reduce
16
+
17
+ from nltk.inference.api import BaseProverCommand, Prover
18
+ from nltk.sem import skolemize
19
+ from nltk.sem.logic import (
20
+ AndExpression,
21
+ ApplicationExpression,
22
+ EqualityExpression,
23
+ Expression,
24
+ IndividualVariableExpression,
25
+ NegatedExpression,
26
+ OrExpression,
27
+ Variable,
28
+ VariableExpression,
29
+ is_indvar,
30
+ unique_variable,
31
+ )
32
+
33
+
34
+ class ProverParseError(Exception):
35
+ pass
36
+
37
+
38
+ class ResolutionProver(Prover):
39
+ ANSWER_KEY = "ANSWER"
40
+ _assume_false = True
41
+
42
+ def _prove(self, goal=None, assumptions=None, verbose=False):
43
+ """
44
+ :param goal: Input expression to prove
45
+ :type goal: sem.Expression
46
+ :param assumptions: Input expressions to use as assumptions in the proof
47
+ :type assumptions: list(sem.Expression)
48
+ """
49
+ if not assumptions:
50
+ assumptions = []
51
+
52
+ result = None
53
+ try:
54
+ clauses = []
55
+ if goal:
56
+ clauses.extend(clausify(-goal))
57
+ for a in assumptions:
58
+ clauses.extend(clausify(a))
59
+ result, clauses = self._attempt_proof(clauses)
60
+ if verbose:
61
+ print(ResolutionProverCommand._decorate_clauses(clauses))
62
+ except RuntimeError as e:
63
+ if self._assume_false and str(e).startswith(
64
+ "maximum recursion depth exceeded"
65
+ ):
66
+ result = False
67
+ clauses = []
68
+ else:
69
+ if verbose:
70
+ print(e)
71
+ else:
72
+ raise e
73
+ return (result, clauses)
74
+
75
+ def _attempt_proof(self, clauses):
76
+ # map indices to lists of indices, to store attempted unifications
77
+ tried = defaultdict(list)
78
+
79
+ i = 0
80
+ while i < len(clauses):
81
+ if not clauses[i].is_tautology():
82
+ # since we try clauses in order, we should start after the last
83
+ # index tried
84
+ if tried[i]:
85
+ j = tried[i][-1] + 1
86
+ else:
87
+ j = i + 1 # nothing tried yet for 'i', so start with the next
88
+
89
+ while j < len(clauses):
90
+ # don't: 1) unify a clause with itself,
91
+ # 2) use tautologies
92
+ if i != j and j and not clauses[j].is_tautology():
93
+ tried[i].append(j)
94
+ newclauses = clauses[i].unify(clauses[j])
95
+ if newclauses:
96
+ for newclause in newclauses:
97
+ newclause._parents = (i + 1, j + 1)
98
+ clauses.append(newclause)
99
+ if not len(newclause): # if there's an empty clause
100
+ return (True, clauses)
101
+ i = -1 # since we added a new clause, restart from the top
102
+ break
103
+ j += 1
104
+ i += 1
105
+ return (False, clauses)
106
+
107
+
108
+ class ResolutionProverCommand(BaseProverCommand):
109
+ def __init__(self, goal=None, assumptions=None, prover=None):
110
+ """
111
+ :param goal: Input expression to prove
112
+ :type goal: sem.Expression
113
+ :param assumptions: Input expressions to use as assumptions in
114
+ the proof.
115
+ :type assumptions: list(sem.Expression)
116
+ """
117
+ if prover is not None:
118
+ assert isinstance(prover, ResolutionProver)
119
+ else:
120
+ prover = ResolutionProver()
121
+
122
+ BaseProverCommand.__init__(self, prover, goal, assumptions)
123
+ self._clauses = None
124
+
125
+ def prove(self, verbose=False):
126
+ """
127
+ Perform the actual proof. Store the result to prevent unnecessary
128
+ re-proving.
129
+ """
130
+ if self._result is None:
131
+ self._result, clauses = self._prover._prove(
132
+ self.goal(), self.assumptions(), verbose
133
+ )
134
+ self._clauses = clauses
135
+ self._proof = ResolutionProverCommand._decorate_clauses(clauses)
136
+ return self._result
137
+
138
+ def find_answers(self, verbose=False):
139
+ self.prove(verbose)
140
+
141
+ answers = set()
142
+ answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY))
143
+ for clause in self._clauses:
144
+ for term in clause:
145
+ if (
146
+ isinstance(term, ApplicationExpression)
147
+ and term.function == answer_ex
148
+ and not isinstance(term.argument, IndividualVariableExpression)
149
+ ):
150
+ answers.add(term.argument)
151
+ return answers
152
+
153
+ @staticmethod
154
+ def _decorate_clauses(clauses):
155
+ """
156
+ Decorate the proof output.
157
+ """
158
+ out = ""
159
+ max_clause_len = max(len(str(clause)) for clause in clauses)
160
+ max_seq_len = len(str(len(clauses)))
161
+ for i in range(len(clauses)):
162
+ parents = "A"
163
+ taut = ""
164
+ if clauses[i].is_tautology():
165
+ taut = "Tautology"
166
+ if clauses[i]._parents:
167
+ parents = str(clauses[i]._parents)
168
+ parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents
169
+ seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1)
170
+ out += f"[{seq}] {clauses[i]} {parents} {taut}\n"
171
+ return out
172
+
173
+
174
+ class Clause(list):
175
+ def __init__(self, data):
176
+ list.__init__(self, data)
177
+ self._is_tautology = None
178
+ self._parents = None
179
+
180
+ def unify(self, other, bindings=None, used=None, skipped=None, debug=False):
181
+ """
182
+ Attempt to unify this Clause with the other, returning a list of
183
+ resulting, unified, Clauses.
184
+
185
+ :param other: ``Clause`` with which to unify
186
+ :param bindings: ``BindingDict`` containing bindings that should be used
187
+ during the unification
188
+ :param used: tuple of two lists of atoms. The first lists the
189
+ atoms from 'self' that were successfully unified with atoms from
190
+ 'other'. The second lists the atoms from 'other' that were successfully
191
+ unified with atoms from 'self'.
192
+ :param skipped: tuple of two ``Clause`` objects. The first is a list of all
193
+ the atoms from the 'self' Clause that have not been unified with
194
+ anything on the path. The second is same thing for the 'other' Clause.
195
+ :param debug: bool indicating whether debug statements should print
196
+ :return: list containing all the resulting ``Clause`` objects that could be
197
+ obtained by unification
198
+ """
199
+ if bindings is None:
200
+ bindings = BindingDict()
201
+ if used is None:
202
+ used = ([], [])
203
+ if skipped is None:
204
+ skipped = ([], [])
205
+ if isinstance(debug, bool):
206
+ debug = DebugObject(debug)
207
+
208
+ newclauses = _iterate_first(
209
+ self, other, bindings, used, skipped, _complete_unify_path, debug
210
+ )
211
+
212
+ # remove subsumed clauses. make a list of all indices of subsumed
213
+ # clauses, and then remove them from the list
214
+ subsumed = []
215
+ for i, c1 in enumerate(newclauses):
216
+ if i not in subsumed:
217
+ for j, c2 in enumerate(newclauses):
218
+ if i != j and j not in subsumed and c1.subsumes(c2):
219
+ subsumed.append(j)
220
+ result = []
221
+ for i in range(len(newclauses)):
222
+ if i not in subsumed:
223
+ result.append(newclauses[i])
224
+
225
+ return result
226
+
227
+ def isSubsetOf(self, other):
228
+ """
229
+ Return True iff every term in 'self' is a term in 'other'.
230
+
231
+ :param other: ``Clause``
232
+ :return: bool
233
+ """
234
+ for a in self:
235
+ if a not in other:
236
+ return False
237
+ return True
238
+
239
+ def subsumes(self, other):
240
+ """
241
+ Return True iff 'self' subsumes 'other', this is, if there is a
242
+ substitution such that every term in 'self' can be unified with a term
243
+ in 'other'.
244
+
245
+ :param other: ``Clause``
246
+ :return: bool
247
+ """
248
+ negatedother = []
249
+ for atom in other:
250
+ if isinstance(atom, NegatedExpression):
251
+ negatedother.append(atom.term)
252
+ else:
253
+ negatedother.append(-atom)
254
+
255
+ negatedotherClause = Clause(negatedother)
256
+
257
+ bindings = BindingDict()
258
+ used = ([], [])
259
+ skipped = ([], [])
260
+ debug = DebugObject(False)
261
+
262
+ return (
263
+ len(
264
+ _iterate_first(
265
+ self,
266
+ negatedotherClause,
267
+ bindings,
268
+ used,
269
+ skipped,
270
+ _subsumes_finalize,
271
+ debug,
272
+ )
273
+ )
274
+ > 0
275
+ )
276
+
277
+ def __getslice__(self, start, end):
278
+ return Clause(list.__getslice__(self, start, end))
279
+
280
+ def __sub__(self, other):
281
+ return Clause([a for a in self if a not in other])
282
+
283
+ def __add__(self, other):
284
+ return Clause(list.__add__(self, other))
285
+
286
+ def is_tautology(self):
287
+ """
288
+ Self is a tautology if it contains ground terms P and -P. The ground
289
+ term, P, must be an exact match, ie, not using unification.
290
+ """
291
+ if self._is_tautology is not None:
292
+ return self._is_tautology
293
+ for i, a in enumerate(self):
294
+ if not isinstance(a, EqualityExpression):
295
+ j = len(self) - 1
296
+ while j > i:
297
+ b = self[j]
298
+ if isinstance(a, NegatedExpression):
299
+ if a.term == b:
300
+ self._is_tautology = True
301
+ return True
302
+ elif isinstance(b, NegatedExpression):
303
+ if a == b.term:
304
+ self._is_tautology = True
305
+ return True
306
+ j -= 1
307
+ self._is_tautology = False
308
+ return False
309
+
310
+ def free(self):
311
+ return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self))
312
+
313
+ def replace(self, variable, expression):
314
+ """
315
+ Replace every instance of variable with expression across every atom
316
+ in the clause
317
+
318
+ :param variable: ``Variable``
319
+ :param expression: ``Expression``
320
+ """
321
+ return Clause([atom.replace(variable, expression) for atom in self])
322
+
323
+ def substitute_bindings(self, bindings):
324
+ """
325
+ Replace every binding
326
+
327
+ :param bindings: A list of tuples mapping Variable Expressions to the
328
+ Expressions to which they are bound.
329
+ :return: ``Clause``
330
+ """
331
+ return Clause([atom.substitute_bindings(bindings) for atom in self])
332
+
333
+ def __str__(self):
334
+ return "{" + ", ".join("%s" % item for item in self) + "}"
335
+
336
+ def __repr__(self):
337
+ return "%s" % self
338
+
339
+
340
+ def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug):
341
+ """
342
+ This method facilitates movement through the terms of 'self'
343
+ """
344
+ debug.line(f"unify({first},{second}) {bindings}")
345
+
346
+ if not len(first) or not len(second): # if no more recursions can be performed
347
+ return finalize_method(first, second, bindings, used, skipped, debug)
348
+ else:
349
+ # explore this 'self' atom
350
+ result = _iterate_second(
351
+ first, second, bindings, used, skipped, finalize_method, debug + 1
352
+ )
353
+
354
+ # skip this possible 'self' atom
355
+ newskipped = (skipped[0] + [first[0]], skipped[1])
356
+ result += _iterate_first(
357
+ first[1:], second, bindings, used, newskipped, finalize_method, debug + 1
358
+ )
359
+
360
+ try:
361
+ newbindings, newused, unused = _unify_terms(
362
+ first[0], second[0], bindings, used
363
+ )
364
+ # Unification found, so progress with this line of unification
365
+ # put skipped and unused terms back into play for later unification.
366
+ newfirst = first[1:] + skipped[0] + unused[0]
367
+ newsecond = second[1:] + skipped[1] + unused[1]
368
+ result += _iterate_first(
369
+ newfirst,
370
+ newsecond,
371
+ newbindings,
372
+ newused,
373
+ ([], []),
374
+ finalize_method,
375
+ debug + 1,
376
+ )
377
+ except BindingException:
378
+ # the atoms could not be unified,
379
+ pass
380
+
381
+ return result
382
+
383
+
384
+ def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug):
385
+ """
386
+ This method facilitates movement through the terms of 'other'
387
+ """
388
+ debug.line(f"unify({first},{second}) {bindings}")
389
+
390
+ if not len(first) or not len(second): # if no more recursions can be performed
391
+ return finalize_method(first, second, bindings, used, skipped, debug)
392
+ else:
393
+ # skip this possible pairing and move to the next
394
+ newskipped = (skipped[0], skipped[1] + [second[0]])
395
+ result = _iterate_second(
396
+ first, second[1:], bindings, used, newskipped, finalize_method, debug + 1
397
+ )
398
+
399
+ try:
400
+ newbindings, newused, unused = _unify_terms(
401
+ first[0], second[0], bindings, used
402
+ )
403
+ # Unification found, so progress with this line of unification
404
+ # put skipped and unused terms back into play for later unification.
405
+ newfirst = first[1:] + skipped[0] + unused[0]
406
+ newsecond = second[1:] + skipped[1] + unused[1]
407
+ result += _iterate_second(
408
+ newfirst,
409
+ newsecond,
410
+ newbindings,
411
+ newused,
412
+ ([], []),
413
+ finalize_method,
414
+ debug + 1,
415
+ )
416
+ except BindingException:
417
+ # the atoms could not be unified,
418
+ pass
419
+
420
+ return result
421
+
422
+
423
+ def _unify_terms(a, b, bindings=None, used=None):
424
+ """
425
+ This method attempts to unify two terms. Two expressions are unifiable
426
+ if there exists a substitution function S such that S(a) == S(-b).
427
+
428
+ :param a: ``Expression``
429
+ :param b: ``Expression``
430
+ :param bindings: ``BindingDict`` a starting set of bindings with which
431
+ the unification must be consistent
432
+ :return: ``BindingDict`` A dictionary of the bindings required to unify
433
+ :raise ``BindingException``: If the terms cannot be unified
434
+ """
435
+ assert isinstance(a, Expression)
436
+ assert isinstance(b, Expression)
437
+
438
+ if bindings is None:
439
+ bindings = BindingDict()
440
+ if used is None:
441
+ used = ([], [])
442
+
443
+ # Use resolution
444
+ if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression):
445
+ newbindings = most_general_unification(a.term, b, bindings)
446
+ newused = (used[0] + [a], used[1] + [b])
447
+ unused = ([], [])
448
+ elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression):
449
+ newbindings = most_general_unification(a, b.term, bindings)
450
+ newused = (used[0] + [a], used[1] + [b])
451
+ unused = ([], [])
452
+
453
+ # Use demodulation
454
+ elif isinstance(a, EqualityExpression):
455
+ newbindings = BindingDict([(a.first.variable, a.second)])
456
+ newused = (used[0] + [a], used[1])
457
+ unused = ([], [b])
458
+ elif isinstance(b, EqualityExpression):
459
+ newbindings = BindingDict([(b.first.variable, b.second)])
460
+ newused = (used[0], used[1] + [b])
461
+ unused = ([a], [])
462
+
463
+ else:
464
+ raise BindingException((a, b))
465
+
466
+ return newbindings, newused, unused
467
+
468
+
469
+ def _complete_unify_path(first, second, bindings, used, skipped, debug):
470
+ if used[0] or used[1]: # if bindings were made along the path
471
+ newclause = Clause(skipped[0] + skipped[1] + first + second)
472
+ debug.line(" -> New Clause: %s" % newclause)
473
+ return [newclause.substitute_bindings(bindings)]
474
+ else: # no bindings made means no unification occurred. so no result
475
+ debug.line(" -> End")
476
+ return []
477
+
478
+
479
+ def _subsumes_finalize(first, second, bindings, used, skipped, debug):
480
+ if not len(skipped[0]) and not len(first):
481
+ # If there are no skipped terms and no terms left in 'first', then
482
+ # all of the terms in the original 'self' were unified with terms
483
+ # in 'other'. Therefore, there exists a binding (this one) such that
484
+ # every term in self can be unified with a term in other, which
485
+ # is the definition of subsumption.
486
+ return [True]
487
+ else:
488
+ return []
489
+
490
+
491
+ def clausify(expression):
492
+ """
493
+ Skolemize, clausify, and standardize the variables apart.
494
+ """
495
+ clause_list = []
496
+ for clause in _clausify(skolemize(expression)):
497
+ for free in clause.free():
498
+ if is_indvar(free.name):
499
+ newvar = VariableExpression(unique_variable())
500
+ clause = clause.replace(free, newvar)
501
+ clause_list.append(clause)
502
+ return clause_list
503
+
504
+
505
+ def _clausify(expression):
506
+ """
507
+ :param expression: a skolemized expression in CNF
508
+ """
509
+ if isinstance(expression, AndExpression):
510
+ return _clausify(expression.first) + _clausify(expression.second)
511
+ elif isinstance(expression, OrExpression):
512
+ first = _clausify(expression.first)
513
+ second = _clausify(expression.second)
514
+ assert len(first) == 1
515
+ assert len(second) == 1
516
+ return [first[0] + second[0]]
517
+ elif isinstance(expression, EqualityExpression):
518
+ return [Clause([expression])]
519
+ elif isinstance(expression, ApplicationExpression):
520
+ return [Clause([expression])]
521
+ elif isinstance(expression, NegatedExpression):
522
+ if isinstance(expression.term, ApplicationExpression):
523
+ return [Clause([expression])]
524
+ elif isinstance(expression.term, EqualityExpression):
525
+ return [Clause([expression])]
526
+ raise ProverParseError()
527
+
528
+
529
+ class BindingDict:
530
+ def __init__(self, binding_list=None):
531
+ """
532
+ :param binding_list: list of (``AbstractVariableExpression``, ``AtomicExpression``) to initialize the dictionary
533
+ """
534
+ self.d = {}
535
+
536
+ if binding_list:
537
+ for (v, b) in binding_list:
538
+ self[v] = b
539
+
540
+ def __setitem__(self, variable, binding):
541
+ """
542
+ A binding is consistent with the dict if its variable is not already bound, OR if its
543
+ variable is already bound to its argument.
544
+
545
+ :param variable: ``Variable`` The variable to bind
546
+ :param binding: ``Expression`` The atomic to which 'variable' should be bound
547
+ :raise BindingException: If the variable cannot be bound in this dictionary
548
+ """
549
+ assert isinstance(variable, Variable)
550
+ assert isinstance(binding, Expression)
551
+
552
+ try:
553
+ existing = self[variable]
554
+ except KeyError:
555
+ existing = None
556
+
557
+ if not existing or binding == existing:
558
+ self.d[variable] = binding
559
+ elif isinstance(binding, IndividualVariableExpression):
560
+ # Since variable is already bound, try to bind binding to variable
561
+ try:
562
+ existing = self[binding.variable]
563
+ except KeyError:
564
+ existing = None
565
+
566
+ binding2 = VariableExpression(variable)
567
+
568
+ if not existing or binding2 == existing:
569
+ self.d[binding.variable] = binding2
570
+ else:
571
+ raise BindingException(
572
+ "Variable %s already bound to another " "value" % (variable)
573
+ )
574
+ else:
575
+ raise BindingException(
576
+ "Variable %s already bound to another " "value" % (variable)
577
+ )
578
+
579
+ def __getitem__(self, variable):
580
+ """
581
+ Return the expression to which 'variable' is bound
582
+ """
583
+ assert isinstance(variable, Variable)
584
+
585
+ intermediate = self.d[variable]
586
+ while intermediate:
587
+ try:
588
+ intermediate = self.d[intermediate]
589
+ except KeyError:
590
+ return intermediate
591
+
592
+ def __contains__(self, item):
593
+ return item in self.d
594
+
595
+ def __add__(self, other):
596
+ """
597
+ :param other: ``BindingDict`` The dict with which to combine self
598
+ :return: ``BindingDict`` A new dict containing all the elements of both parameters
599
+ :raise BindingException: If the parameter dictionaries are not consistent with each other
600
+ """
601
+ try:
602
+ combined = BindingDict()
603
+ for v in self.d:
604
+ combined[v] = self.d[v]
605
+ for v in other.d:
606
+ combined[v] = other.d[v]
607
+ return combined
608
+ except BindingException as e:
609
+ raise BindingException(
610
+ "Attempting to add two contradicting "
611
+ "BindingDicts: '%s' and '%s'" % (self, other)
612
+ ) from e
613
+
614
+ def __len__(self):
615
+ return len(self.d)
616
+
617
+ def __str__(self):
618
+ data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys()))
619
+ return "{" + data_str + "}"
620
+
621
+ def __repr__(self):
622
+ return "%s" % self
623
+
624
+
625
+ def most_general_unification(a, b, bindings=None):
626
+ """
627
+ Find the most general unification of the two given expressions
628
+
629
+ :param a: ``Expression``
630
+ :param b: ``Expression``
631
+ :param bindings: ``BindingDict`` a starting set of bindings with which the
632
+ unification must be consistent
633
+ :return: a list of bindings
634
+ :raise BindingException: if the Expressions cannot be unified
635
+ """
636
+ if bindings is None:
637
+ bindings = BindingDict()
638
+
639
+ if a == b:
640
+ return bindings
641
+ elif isinstance(a, IndividualVariableExpression):
642
+ return _mgu_var(a, b, bindings)
643
+ elif isinstance(b, IndividualVariableExpression):
644
+ return _mgu_var(b, a, bindings)
645
+ elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression):
646
+ return most_general_unification(
647
+ a.function, b.function, bindings
648
+ ) + most_general_unification(a.argument, b.argument, bindings)
649
+ raise BindingException((a, b))
650
+
651
+
652
+ def _mgu_var(var, expression, bindings):
653
+ if var.variable in expression.free() | expression.constants():
654
+ raise BindingException((var, expression))
655
+ else:
656
+ return BindingDict([(var.variable, expression)]) + bindings
657
+
658
+
659
+ class BindingException(Exception):
660
+ def __init__(self, arg):
661
+ if isinstance(arg, tuple):
662
+ Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg)
663
+ else:
664
+ Exception.__init__(self, arg)
665
+
666
+
667
+ class UnificationException(Exception):
668
+ def __init__(self, a, b):
669
+ Exception.__init__(self, f"'{a}' cannot unify with '{b}'")
670
+
671
+
672
+ class DebugObject:
673
+ def __init__(self, enabled=True, indent=0):
674
+ self.enabled = enabled
675
+ self.indent = indent
676
+
677
+ def __add__(self, i):
678
+ return DebugObject(self.enabled, self.indent + i)
679
+
680
+ def line(self, line):
681
+ if self.enabled:
682
+ print(" " * self.indent + line)
683
+
684
+
685
+ def testResolutionProver():
686
+ resolution_test(r"man(x)")
687
+ resolution_test(r"(man(x) -> man(x))")
688
+ resolution_test(r"(man(x) -> --man(x))")
689
+ resolution_test(r"-(man(x) and -man(x))")
690
+ resolution_test(r"(man(x) or -man(x))")
691
+ resolution_test(r"(man(x) -> man(x))")
692
+ resolution_test(r"-(man(x) and -man(x))")
693
+ resolution_test(r"(man(x) or -man(x))")
694
+ resolution_test(r"(man(x) -> man(x))")
695
+ resolution_test(r"(man(x) iff man(x))")
696
+ resolution_test(r"-(man(x) iff -man(x))")
697
+ resolution_test("all x.man(x)")
698
+ resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))")
699
+ resolution_test("some x.all y.sees(x,y)")
700
+
701
+ p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))")
702
+ p2 = Expression.fromstring(r"man(Socrates)")
703
+ c = Expression.fromstring(r"mortal(Socrates)")
704
+ print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
705
+
706
+ p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))")
707
+ p2 = Expression.fromstring(r"man(John)")
708
+ c = Expression.fromstring(r"some y.walks(y)")
709
+ print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
710
+
711
+ p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))")
712
+ c = Expression.fromstring(r"some e0.walk(e0,mary)")
713
+ print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}")
714
+
715
+
716
+ def resolution_test(e):
717
+ f = Expression.fromstring(e)
718
+ t = ResolutionProver().prove(f)
719
+ print(f"|- {f}: {t}")
720
+
721
+
722
+ def test_clausify():
723
+ lexpr = Expression.fromstring
724
+
725
+ print(clausify(lexpr("P(x) | Q(x)")))
726
+ print(clausify(lexpr("(P(x) & Q(x)) | R(x)")))
727
+ print(clausify(lexpr("P(x) | (Q(x) & R(x))")))
728
+ print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))")))
729
+
730
+ print(clausify(lexpr("P(x) | Q(x) | R(x)")))
731
+ print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)")))
732
+
733
+ print(clausify(lexpr("exists x.P(x) | Q(x)")))
734
+
735
+ print(clausify(lexpr("-(-P(x) & Q(x))")))
736
+ print(clausify(lexpr("P(x) <-> Q(x)")))
737
+ print(clausify(lexpr("-(P(x) <-> Q(x))")))
738
+ print(clausify(lexpr("-(all x.P(x))")))
739
+ print(clausify(lexpr("-(some x.P(x))")))
740
+
741
+ print(clausify(lexpr("some x.P(x)")))
742
+ print(clausify(lexpr("some x.all y.P(x,y)")))
743
+ print(clausify(lexpr("all y.some x.P(x,y)")))
744
+ print(clausify(lexpr("all z.all y.some x.P(x,y,z)")))
745
+ print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))")))
746
+
747
+
748
+ def demo():
749
+ test_clausify()
750
+ print()
751
+ testResolutionProver()
752
+ print()
753
+
754
+ p = Expression.fromstring("man(x)")
755
+ print(ResolutionProverCommand(p, [p]).prove())
756
+
757
+
758
+ if __name__ == "__main__":
759
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/tableau.py ADDED
@@ -0,0 +1,712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: First-Order Tableau Theorem Prover
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Garrette <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Module for a tableau-based First Order theorem prover.
11
+ """
12
+
13
+ from nltk.inference.api import BaseProverCommand, Prover
14
+ from nltk.internals import Counter
15
+ from nltk.sem.logic import (
16
+ AbstractVariableExpression,
17
+ AllExpression,
18
+ AndExpression,
19
+ ApplicationExpression,
20
+ EqualityExpression,
21
+ ExistsExpression,
22
+ Expression,
23
+ FunctionVariableExpression,
24
+ IffExpression,
25
+ ImpExpression,
26
+ LambdaExpression,
27
+ NegatedExpression,
28
+ OrExpression,
29
+ Variable,
30
+ VariableExpression,
31
+ unique_variable,
32
+ )
33
+
34
+ _counter = Counter()
35
+
36
+
37
+ class ProverParseError(Exception):
38
+ pass
39
+
40
+
41
+ class TableauProver(Prover):
42
+ _assume_false = False
43
+
44
+ def _prove(self, goal=None, assumptions=None, verbose=False):
45
+ if not assumptions:
46
+ assumptions = []
47
+
48
+ result = None
49
+ try:
50
+ agenda = Agenda()
51
+ if goal:
52
+ agenda.put(-goal)
53
+ agenda.put_all(assumptions)
54
+ debugger = Debug(verbose)
55
+ result = self._attempt_proof(agenda, set(), set(), debugger)
56
+ except RuntimeError as e:
57
+ if self._assume_false and str(e).startswith(
58
+ "maximum recursion depth exceeded"
59
+ ):
60
+ result = False
61
+ else:
62
+ if verbose:
63
+ print(e)
64
+ else:
65
+ raise e
66
+ return (result, "\n".join(debugger.lines))
67
+
68
+ def _attempt_proof(self, agenda, accessible_vars, atoms, debug):
69
+ (current, context), category = agenda.pop_first()
70
+
71
+ # if there's nothing left in the agenda, and we haven't closed the path
72
+ if not current:
73
+ debug.line("AGENDA EMPTY")
74
+ return False
75
+
76
+ proof_method = {
77
+ Categories.ATOM: self._attempt_proof_atom,
78
+ Categories.PROP: self._attempt_proof_prop,
79
+ Categories.N_ATOM: self._attempt_proof_n_atom,
80
+ Categories.N_PROP: self._attempt_proof_n_prop,
81
+ Categories.APP: self._attempt_proof_app,
82
+ Categories.N_APP: self._attempt_proof_n_app,
83
+ Categories.N_EQ: self._attempt_proof_n_eq,
84
+ Categories.D_NEG: self._attempt_proof_d_neg,
85
+ Categories.N_ALL: self._attempt_proof_n_all,
86
+ Categories.N_EXISTS: self._attempt_proof_n_some,
87
+ Categories.AND: self._attempt_proof_and,
88
+ Categories.N_OR: self._attempt_proof_n_or,
89
+ Categories.N_IMP: self._attempt_proof_n_imp,
90
+ Categories.OR: self._attempt_proof_or,
91
+ Categories.IMP: self._attempt_proof_imp,
92
+ Categories.N_AND: self._attempt_proof_n_and,
93
+ Categories.IFF: self._attempt_proof_iff,
94
+ Categories.N_IFF: self._attempt_proof_n_iff,
95
+ Categories.EQ: self._attempt_proof_eq,
96
+ Categories.EXISTS: self._attempt_proof_some,
97
+ Categories.ALL: self._attempt_proof_all,
98
+ }[category]
99
+
100
+ debug.line((current, context))
101
+ return proof_method(current, context, agenda, accessible_vars, atoms, debug)
102
+
103
+ def _attempt_proof_atom(
104
+ self, current, context, agenda, accessible_vars, atoms, debug
105
+ ):
106
+ # Check if the branch is closed. Return 'True' if it is
107
+ if (current, True) in atoms:
108
+ debug.line("CLOSED", 1)
109
+ return True
110
+
111
+ if context:
112
+ if isinstance(context.term, NegatedExpression):
113
+ current = current.negate()
114
+ agenda.put(context(current).simplify())
115
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
116
+ else:
117
+ # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
118
+ agenda.mark_alls_fresh()
119
+ return self._attempt_proof(
120
+ agenda,
121
+ accessible_vars | set(current.args),
122
+ atoms | {(current, False)},
123
+ debug + 1,
124
+ )
125
+
126
+ def _attempt_proof_n_atom(
127
+ self, current, context, agenda, accessible_vars, atoms, debug
128
+ ):
129
+ # Check if the branch is closed. Return 'True' if it is
130
+ if (current.term, False) in atoms:
131
+ debug.line("CLOSED", 1)
132
+ return True
133
+
134
+ if context:
135
+ if isinstance(context.term, NegatedExpression):
136
+ current = current.negate()
137
+ agenda.put(context(current).simplify())
138
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
139
+ else:
140
+ # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
141
+ agenda.mark_alls_fresh()
142
+ return self._attempt_proof(
143
+ agenda,
144
+ accessible_vars | set(current.term.args),
145
+ atoms | {(current.term, True)},
146
+ debug + 1,
147
+ )
148
+
149
+ def _attempt_proof_prop(
150
+ self, current, context, agenda, accessible_vars, atoms, debug
151
+ ):
152
+ # Check if the branch is closed. Return 'True' if it is
153
+ if (current, True) in atoms:
154
+ debug.line("CLOSED", 1)
155
+ return True
156
+
157
+ # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
158
+ agenda.mark_alls_fresh()
159
+ return self._attempt_proof(
160
+ agenda, accessible_vars, atoms | {(current, False)}, debug + 1
161
+ )
162
+
163
+ def _attempt_proof_n_prop(
164
+ self, current, context, agenda, accessible_vars, atoms, debug
165
+ ):
166
+ # Check if the branch is closed. Return 'True' if it is
167
+ if (current.term, False) in atoms:
168
+ debug.line("CLOSED", 1)
169
+ return True
170
+
171
+ # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
172
+ agenda.mark_alls_fresh()
173
+ return self._attempt_proof(
174
+ agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1
175
+ )
176
+
177
+ def _attempt_proof_app(
178
+ self, current, context, agenda, accessible_vars, atoms, debug
179
+ ):
180
+ f, args = current.uncurry()
181
+ for i, arg in enumerate(args):
182
+ if not TableauProver.is_atom(arg):
183
+ ctx = f
184
+ nv = Variable("X%s" % _counter.get())
185
+ for j, a in enumerate(args):
186
+ ctx = ctx(VariableExpression(nv)) if i == j else ctx(a)
187
+ if context:
188
+ ctx = context(ctx).simplify()
189
+ ctx = LambdaExpression(nv, ctx)
190
+ agenda.put(arg, ctx)
191
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
192
+ raise Exception("If this method is called, there must be a non-atomic argument")
193
+
194
+ def _attempt_proof_n_app(
195
+ self, current, context, agenda, accessible_vars, atoms, debug
196
+ ):
197
+ f, args = current.term.uncurry()
198
+ for i, arg in enumerate(args):
199
+ if not TableauProver.is_atom(arg):
200
+ ctx = f
201
+ nv = Variable("X%s" % _counter.get())
202
+ for j, a in enumerate(args):
203
+ ctx = ctx(VariableExpression(nv)) if i == j else ctx(a)
204
+ if context:
205
+ # combine new context with existing
206
+ ctx = context(ctx).simplify()
207
+ ctx = LambdaExpression(nv, -ctx)
208
+ agenda.put(-arg, ctx)
209
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
210
+ raise Exception("If this method is called, there must be a non-atomic argument")
211
+
212
+ def _attempt_proof_n_eq(
213
+ self, current, context, agenda, accessible_vars, atoms, debug
214
+ ):
215
+ ###########################################################################
216
+ # Since 'current' is of type '~(a=b)', the path is closed if 'a' == 'b'
217
+ ###########################################################################
218
+ if current.term.first == current.term.second:
219
+ debug.line("CLOSED", 1)
220
+ return True
221
+
222
+ agenda[Categories.N_EQ].add((current, context))
223
+ current._exhausted = True
224
+ return self._attempt_proof(
225
+ agenda,
226
+ accessible_vars | {current.term.first, current.term.second},
227
+ atoms,
228
+ debug + 1,
229
+ )
230
+
231
+ def _attempt_proof_d_neg(
232
+ self, current, context, agenda, accessible_vars, atoms, debug
233
+ ):
234
+ agenda.put(current.term.term, context)
235
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
236
+
237
+ def _attempt_proof_n_all(
238
+ self, current, context, agenda, accessible_vars, atoms, debug
239
+ ):
240
+ agenda[Categories.EXISTS].add(
241
+ (ExistsExpression(current.term.variable, -current.term.term), context)
242
+ )
243
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
244
+
245
+ def _attempt_proof_n_some(
246
+ self, current, context, agenda, accessible_vars, atoms, debug
247
+ ):
248
+ agenda[Categories.ALL].add(
249
+ (AllExpression(current.term.variable, -current.term.term), context)
250
+ )
251
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
252
+
253
+ def _attempt_proof_and(
254
+ self, current, context, agenda, accessible_vars, atoms, debug
255
+ ):
256
+ agenda.put(current.first, context)
257
+ agenda.put(current.second, context)
258
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
259
+
260
+ def _attempt_proof_n_or(
261
+ self, current, context, agenda, accessible_vars, atoms, debug
262
+ ):
263
+ agenda.put(-current.term.first, context)
264
+ agenda.put(-current.term.second, context)
265
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
266
+
267
+ def _attempt_proof_n_imp(
268
+ self, current, context, agenda, accessible_vars, atoms, debug
269
+ ):
270
+ agenda.put(current.term.first, context)
271
+ agenda.put(-current.term.second, context)
272
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
273
+
274
+ def _attempt_proof_or(
275
+ self, current, context, agenda, accessible_vars, atoms, debug
276
+ ):
277
+ new_agenda = agenda.clone()
278
+ agenda.put(current.first, context)
279
+ new_agenda.put(current.second, context)
280
+ return self._attempt_proof(
281
+ agenda, accessible_vars, atoms, debug + 1
282
+ ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
283
+
284
+ def _attempt_proof_imp(
285
+ self, current, context, agenda, accessible_vars, atoms, debug
286
+ ):
287
+ new_agenda = agenda.clone()
288
+ agenda.put(-current.first, context)
289
+ new_agenda.put(current.second, context)
290
+ return self._attempt_proof(
291
+ agenda, accessible_vars, atoms, debug + 1
292
+ ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
293
+
294
+ def _attempt_proof_n_and(
295
+ self, current, context, agenda, accessible_vars, atoms, debug
296
+ ):
297
+ new_agenda = agenda.clone()
298
+ agenda.put(-current.term.first, context)
299
+ new_agenda.put(-current.term.second, context)
300
+ return self._attempt_proof(
301
+ agenda, accessible_vars, atoms, debug + 1
302
+ ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
303
+
304
+ def _attempt_proof_iff(
305
+ self, current, context, agenda, accessible_vars, atoms, debug
306
+ ):
307
+ new_agenda = agenda.clone()
308
+ agenda.put(current.first, context)
309
+ agenda.put(current.second, context)
310
+ new_agenda.put(-current.first, context)
311
+ new_agenda.put(-current.second, context)
312
+ return self._attempt_proof(
313
+ agenda, accessible_vars, atoms, debug + 1
314
+ ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
315
+
316
+ def _attempt_proof_n_iff(
317
+ self, current, context, agenda, accessible_vars, atoms, debug
318
+ ):
319
+ new_agenda = agenda.clone()
320
+ agenda.put(current.term.first, context)
321
+ agenda.put(-current.term.second, context)
322
+ new_agenda.put(-current.term.first, context)
323
+ new_agenda.put(current.term.second, context)
324
+ return self._attempt_proof(
325
+ agenda, accessible_vars, atoms, debug + 1
326
+ ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
327
+
328
+ def _attempt_proof_eq(
329
+ self, current, context, agenda, accessible_vars, atoms, debug
330
+ ):
331
+ #########################################################################
332
+ # Since 'current' is of the form '(a = b)', replace ALL free instances
333
+ # of 'a' with 'b'
334
+ #########################################################################
335
+ agenda.put_atoms(atoms)
336
+ agenda.replace_all(current.first, current.second)
337
+ accessible_vars.discard(current.first)
338
+ agenda.mark_neqs_fresh()
339
+ return self._attempt_proof(agenda, accessible_vars, set(), debug + 1)
340
+
341
+ def _attempt_proof_some(
342
+ self, current, context, agenda, accessible_vars, atoms, debug
343
+ ):
344
+ new_unique_variable = VariableExpression(unique_variable())
345
+ agenda.put(current.term.replace(current.variable, new_unique_variable), context)
346
+ agenda.mark_alls_fresh()
347
+ return self._attempt_proof(
348
+ agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1
349
+ )
350
+
351
+ def _attempt_proof_all(
352
+ self, current, context, agenda, accessible_vars, atoms, debug
353
+ ):
354
+ try:
355
+ current._used_vars
356
+ except AttributeError:
357
+ current._used_vars = set()
358
+
359
+ # if there are accessible_vars on the path
360
+ if accessible_vars:
361
+ # get the set of bound variables that have not be used by this AllExpression
362
+ bv_available = accessible_vars - current._used_vars
363
+
364
+ if bv_available:
365
+ variable_to_use = list(bv_available)[0]
366
+ debug.line("--> Using '%s'" % variable_to_use, 2)
367
+ current._used_vars |= {variable_to_use}
368
+ agenda.put(
369
+ current.term.replace(current.variable, variable_to_use), context
370
+ )
371
+ agenda[Categories.ALL].add((current, context))
372
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
373
+
374
+ else:
375
+ # no more available variables to substitute
376
+ debug.line("--> Variables Exhausted", 2)
377
+ current._exhausted = True
378
+ agenda[Categories.ALL].add((current, context))
379
+ return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
380
+
381
+ else:
382
+ new_unique_variable = VariableExpression(unique_variable())
383
+ debug.line("--> Using '%s'" % new_unique_variable, 2)
384
+ current._used_vars |= {new_unique_variable}
385
+ agenda.put(
386
+ current.term.replace(current.variable, new_unique_variable), context
387
+ )
388
+ agenda[Categories.ALL].add((current, context))
389
+ agenda.mark_alls_fresh()
390
+ return self._attempt_proof(
391
+ agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1
392
+ )
393
+
394
+ @staticmethod
395
+ def is_atom(e):
396
+ if isinstance(e, NegatedExpression):
397
+ e = e.term
398
+
399
+ if isinstance(e, ApplicationExpression):
400
+ for arg in e.args:
401
+ if not TableauProver.is_atom(arg):
402
+ return False
403
+ return True
404
+ elif isinstance(e, AbstractVariableExpression) or isinstance(
405
+ e, LambdaExpression
406
+ ):
407
+ return True
408
+ else:
409
+ return False
410
+
411
+
412
+ class TableauProverCommand(BaseProverCommand):
413
+ def __init__(self, goal=None, assumptions=None, prover=None):
414
+ """
415
+ :param goal: Input expression to prove
416
+ :type goal: sem.Expression
417
+ :param assumptions: Input expressions to use as assumptions in
418
+ the proof.
419
+ :type assumptions: list(sem.Expression)
420
+ """
421
+ if prover is not None:
422
+ assert isinstance(prover, TableauProver)
423
+ else:
424
+ prover = TableauProver()
425
+
426
+ BaseProverCommand.__init__(self, prover, goal, assumptions)
427
+
428
+
429
+ class Agenda:
430
+ def __init__(self):
431
+ self.sets = tuple(set() for i in range(21))
432
+
433
+ def clone(self):
434
+ new_agenda = Agenda()
435
+ set_list = [s.copy() for s in self.sets]
436
+
437
+ new_allExs = set()
438
+ for allEx, _ in set_list[Categories.ALL]:
439
+ new_allEx = AllExpression(allEx.variable, allEx.term)
440
+ try:
441
+ new_allEx._used_vars = {used for used in allEx._used_vars}
442
+ except AttributeError:
443
+ new_allEx._used_vars = set()
444
+ new_allExs.add((new_allEx, None))
445
+ set_list[Categories.ALL] = new_allExs
446
+
447
+ set_list[Categories.N_EQ] = {
448
+ (NegatedExpression(n_eq.term), ctx)
449
+ for (n_eq, ctx) in set_list[Categories.N_EQ]
450
+ }
451
+
452
+ new_agenda.sets = tuple(set_list)
453
+ return new_agenda
454
+
455
+ def __getitem__(self, index):
456
+ return self.sets[index]
457
+
458
+ def put(self, expression, context=None):
459
+ if isinstance(expression, AllExpression):
460
+ ex_to_add = AllExpression(expression.variable, expression.term)
461
+ try:
462
+ ex_to_add._used_vars = {used for used in expression._used_vars}
463
+ except AttributeError:
464
+ ex_to_add._used_vars = set()
465
+ else:
466
+ ex_to_add = expression
467
+ self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context))
468
+
469
+ def put_all(self, expressions):
470
+ for expression in expressions:
471
+ self.put(expression)
472
+
473
+ def put_atoms(self, atoms):
474
+ for atom, neg in atoms:
475
+ if neg:
476
+ self[Categories.N_ATOM].add((-atom, None))
477
+ else:
478
+ self[Categories.ATOM].add((atom, None))
479
+
480
+ def pop_first(self):
481
+ """Pop the first expression that appears in the agenda"""
482
+ for i, s in enumerate(self.sets):
483
+ if s:
484
+ if i in [Categories.N_EQ, Categories.ALL]:
485
+ for ex in s:
486
+ try:
487
+ if not ex[0]._exhausted:
488
+ s.remove(ex)
489
+ return (ex, i)
490
+ except AttributeError:
491
+ s.remove(ex)
492
+ return (ex, i)
493
+ else:
494
+ return (s.pop(), i)
495
+ return ((None, None), None)
496
+
497
+ def replace_all(self, old, new):
498
+ for s in self.sets:
499
+ for ex, ctx in s:
500
+ ex.replace(old.variable, new)
501
+ if ctx is not None:
502
+ ctx.replace(old.variable, new)
503
+
504
+ def mark_alls_fresh(self):
505
+ for u, _ in self.sets[Categories.ALL]:
506
+ u._exhausted = False
507
+
508
+ def mark_neqs_fresh(self):
509
+ for neq, _ in self.sets[Categories.N_EQ]:
510
+ neq._exhausted = False
511
+
512
+ def _categorize_expression(self, current):
513
+ if isinstance(current, NegatedExpression):
514
+ return self._categorize_NegatedExpression(current)
515
+ elif isinstance(current, FunctionVariableExpression):
516
+ return Categories.PROP
517
+ elif TableauProver.is_atom(current):
518
+ return Categories.ATOM
519
+ elif isinstance(current, AllExpression):
520
+ return Categories.ALL
521
+ elif isinstance(current, AndExpression):
522
+ return Categories.AND
523
+ elif isinstance(current, OrExpression):
524
+ return Categories.OR
525
+ elif isinstance(current, ImpExpression):
526
+ return Categories.IMP
527
+ elif isinstance(current, IffExpression):
528
+ return Categories.IFF
529
+ elif isinstance(current, EqualityExpression):
530
+ return Categories.EQ
531
+ elif isinstance(current, ExistsExpression):
532
+ return Categories.EXISTS
533
+ elif isinstance(current, ApplicationExpression):
534
+ return Categories.APP
535
+ else:
536
+ raise ProverParseError("cannot categorize %s" % current.__class__.__name__)
537
+
538
+ def _categorize_NegatedExpression(self, current):
539
+ negated = current.term
540
+
541
+ if isinstance(negated, NegatedExpression):
542
+ return Categories.D_NEG
543
+ elif isinstance(negated, FunctionVariableExpression):
544
+ return Categories.N_PROP
545
+ elif TableauProver.is_atom(negated):
546
+ return Categories.N_ATOM
547
+ elif isinstance(negated, AllExpression):
548
+ return Categories.N_ALL
549
+ elif isinstance(negated, AndExpression):
550
+ return Categories.N_AND
551
+ elif isinstance(negated, OrExpression):
552
+ return Categories.N_OR
553
+ elif isinstance(negated, ImpExpression):
554
+ return Categories.N_IMP
555
+ elif isinstance(negated, IffExpression):
556
+ return Categories.N_IFF
557
+ elif isinstance(negated, EqualityExpression):
558
+ return Categories.N_EQ
559
+ elif isinstance(negated, ExistsExpression):
560
+ return Categories.N_EXISTS
561
+ elif isinstance(negated, ApplicationExpression):
562
+ return Categories.N_APP
563
+ else:
564
+ raise ProverParseError("cannot categorize %s" % negated.__class__.__name__)
565
+
566
+
567
+ class Debug:
568
+ def __init__(self, verbose, indent=0, lines=None):
569
+ self.verbose = verbose
570
+ self.indent = indent
571
+
572
+ if not lines:
573
+ lines = []
574
+ self.lines = lines
575
+
576
+ def __add__(self, increment):
577
+ return Debug(self.verbose, self.indent + 1, self.lines)
578
+
579
+ def line(self, data, indent=0):
580
+ if isinstance(data, tuple):
581
+ ex, ctx = data
582
+ if ctx:
583
+ data = f"{ex}, {ctx}"
584
+ else:
585
+ data = "%s" % ex
586
+
587
+ if isinstance(ex, AllExpression):
588
+ try:
589
+ used_vars = "[%s]" % (
590
+ ",".join("%s" % ve.variable.name for ve in ex._used_vars)
591
+ )
592
+ data += ": %s" % used_vars
593
+ except AttributeError:
594
+ data += ": []"
595
+
596
+ newline = "{}{}".format(" " * (self.indent + indent), data)
597
+ self.lines.append(newline)
598
+
599
+ if self.verbose:
600
+ print(newline)
601
+
602
+
603
+ class Categories:
604
+ ATOM = 0
605
+ PROP = 1
606
+ N_ATOM = 2
607
+ N_PROP = 3
608
+ APP = 4
609
+ N_APP = 5
610
+ N_EQ = 6
611
+ D_NEG = 7
612
+ N_ALL = 8
613
+ N_EXISTS = 9
614
+ AND = 10
615
+ N_OR = 11
616
+ N_IMP = 12
617
+ OR = 13
618
+ IMP = 14
619
+ N_AND = 15
620
+ IFF = 16
621
+ N_IFF = 17
622
+ EQ = 18
623
+ EXISTS = 19
624
+ ALL = 20
625
+
626
+
627
+ def testTableauProver():
628
+ tableau_test("P | -P")
629
+ tableau_test("P & -P")
630
+ tableau_test("Q", ["P", "(P -> Q)"])
631
+ tableau_test("man(x)")
632
+ tableau_test("(man(x) -> man(x))")
633
+ tableau_test("(man(x) -> --man(x))")
634
+ tableau_test("-(man(x) and -man(x))")
635
+ tableau_test("(man(x) or -man(x))")
636
+ tableau_test("(man(x) -> man(x))")
637
+ tableau_test("-(man(x) and -man(x))")
638
+ tableau_test("(man(x) or -man(x))")
639
+ tableau_test("(man(x) -> man(x))")
640
+ tableau_test("(man(x) iff man(x))")
641
+ tableau_test("-(man(x) iff -man(x))")
642
+ tableau_test("all x.man(x)")
643
+ tableau_test("all x.all y.((x = y) -> (y = x))")
644
+ tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))")
645
+ # tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))')
646
+ # tableau_test('some x.all y.sees(x,y)')
647
+
648
+ p1 = "all x.(man(x) -> mortal(x))"
649
+ p2 = "man(Socrates)"
650
+ c = "mortal(Socrates)"
651
+ tableau_test(c, [p1, p2])
652
+
653
+ p1 = "all x.(man(x) -> walks(x))"
654
+ p2 = "man(John)"
655
+ c = "some y.walks(y)"
656
+ tableau_test(c, [p1, p2])
657
+
658
+ p = "((x = y) & walks(y))"
659
+ c = "walks(x)"
660
+ tableau_test(c, [p])
661
+
662
+ p = "((x = y) & ((y = z) & (z = w)))"
663
+ c = "(x = w)"
664
+ tableau_test(c, [p])
665
+
666
+ p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))"
667
+ c = "some e0.walk(e0,mary)"
668
+ tableau_test(c, [p])
669
+
670
+ c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))"
671
+ tableau_test(c)
672
+
673
+
674
+ # p = 'some e1.some e2.((believe e1 john e2) and (walk e2 mary))'
675
+ # c = 'some x.some e3.some e4.((believe e3 x e4) and (walk e4 mary))'
676
+ # tableau_test(c, [p])
677
+
678
+
679
+ def testHigherOrderTableauProver():
680
+ tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
681
+ tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"])
682
+ tableau_test(
683
+ "believe(j, lie(b))", ["lie(b)"]
684
+ ) # how do we capture that John believes all things that are true
685
+ tableau_test(
686
+ "believe(j, know(b, cheat(b)))",
687
+ ["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"],
688
+ )
689
+ tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"])
690
+
691
+ tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"])
692
+ tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
693
+
694
+
695
+ def tableau_test(c, ps=None, verbose=False):
696
+ pc = Expression.fromstring(c)
697
+ pps = [Expression.fromstring(p) for p in ps] if ps else []
698
+ if not ps:
699
+ ps = []
700
+ print(
701
+ "%s |- %s: %s"
702
+ % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose))
703
+ )
704
+
705
+
706
+ def demo():
707
+ testTableauProver()
708
+ testHigherOrderTableauProver()
709
+
710
+
711
+ if __name__ == "__main__":
712
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ """
11
+ NLTK Metrics
12
+
13
+ Classes and methods for scoring processing modules.
14
+ """
15
+
16
+ from nltk.metrics.agreement import AnnotationTask
17
+ from nltk.metrics.aline import align
18
+ from nltk.metrics.association import (
19
+ BigramAssocMeasures,
20
+ ContingencyMeasures,
21
+ NgramAssocMeasures,
22
+ QuadgramAssocMeasures,
23
+ TrigramAssocMeasures,
24
+ )
25
+ from nltk.metrics.confusionmatrix import ConfusionMatrix
26
+ from nltk.metrics.distance import (
27
+ binary_distance,
28
+ custom_distance,
29
+ edit_distance,
30
+ edit_distance_align,
31
+ fractional_presence,
32
+ interval_distance,
33
+ jaccard_distance,
34
+ masi_distance,
35
+ presence,
36
+ )
37
+ from nltk.metrics.paice import Paice
38
+ from nltk.metrics.scores import (
39
+ accuracy,
40
+ approxrand,
41
+ f_measure,
42
+ log_likelihood,
43
+ precision,
44
+ recall,
45
+ )
46
+ from nltk.metrics.segmentation import ghd, pk, windowdiff
47
+ from nltk.metrics.spearman import (
48
+ ranks_from_scores,
49
+ ranks_from_sequence,
50
+ spearman_correlation,
51
+ )
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/agreement.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Agreement Metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tom Lippincott <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+
9
+ """
10
+ Implementations of inter-annotator agreement coefficients surveyed by Artstein
11
+ and Poesio (2007), Inter-Coder Agreement for Computational Linguistics.
12
+
13
+ An agreement coefficient calculates the amount that annotators agreed on label
14
+ assignments beyond what is expected by chance.
15
+
16
+ In defining the AnnotationTask class, we use naming conventions similar to the
17
+ paper's terminology. There are three types of objects in an annotation task:
18
+
19
+ the coders (variables "c" and "C")
20
+ the items to be annotated (variables "i" and "I")
21
+ the potential categories to be assigned (variables "k" and "K")
22
+
23
+ Additionally, it is often the case that we don't want to treat two different
24
+ labels as complete disagreement, and so the AnnotationTask constructor can also
25
+ take a distance metric as a final argument. Distance metrics are simply
26
+ functions that take two arguments, and return a value between 0.0 and 1.0
27
+ indicating the distance between them. If not supplied, the default is binary
28
+ comparison between the arguments.
29
+
30
+ The simplest way to initialize an AnnotationTask is with a list of triples,
31
+ each containing a coder's assignment for one object in the task:
32
+
33
+ task = AnnotationTask(data=[('c1', '1', 'v1'),('c2', '1', 'v1'),...])
34
+
35
+ Note that the data list needs to contain the same number of triples for each
36
+ individual coder, containing category values for the same set of items.
37
+
38
+ Alpha (Krippendorff 1980)
39
+ Kappa (Cohen 1960)
40
+ S (Bennet, Albert and Goldstein 1954)
41
+ Pi (Scott 1955)
42
+
43
+
44
+ TODO: Describe handling of multiple coders and missing data
45
+
46
+ Expected results from the Artstein and Poesio survey paper:
47
+
48
+ >>> from nltk.metrics.agreement import AnnotationTask
49
+ >>> import os.path
50
+ >>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))])
51
+ >>> t.avg_Ao()
52
+ 0.88
53
+ >>> round(t.pi(), 5)
54
+ 0.79953
55
+ >>> round(t.S(), 2)
56
+ 0.82
57
+
58
+ This would have returned a wrong value (0.0) in @785fb79 as coders are in
59
+ the wrong order. Subsequently, all values for pi(), S(), and kappa() would
60
+ have been wrong as they are computed with avg_Ao().
61
+ >>> t2 = AnnotationTask(data=[('b','1','stat'),('a','1','stat')])
62
+ >>> t2.avg_Ao()
63
+ 1.0
64
+
65
+ The following, of course, also works.
66
+ >>> t3 = AnnotationTask(data=[('a','1','othr'),('b','1','othr')])
67
+ >>> t3.avg_Ao()
68
+ 1.0
69
+
70
+ """
71
+
72
+ import logging
73
+ from itertools import groupby
74
+ from operator import itemgetter
75
+
76
+ from nltk.internals import deprecated
77
+ from nltk.metrics.distance import binary_distance
78
+ from nltk.probability import ConditionalFreqDist, FreqDist
79
+
80
+ log = logging.getLogger(__name__)
81
+
82
+
83
+ class AnnotationTask:
84
+ """Represents an annotation task, i.e. people assign labels to items.
85
+
86
+ Notation tries to match notation in Artstein and Poesio (2007).
87
+
88
+ In general, coders and items can be represented as any hashable object.
89
+ Integers, for example, are fine, though strings are more readable.
90
+ Labels must support the distance functions applied to them, so e.g.
91
+ a string-edit-distance makes no sense if your labels are integers,
92
+ whereas interval distance needs numeric values. A notable case of this
93
+ is the MASI metric, which requires Python sets.
94
+ """
95
+
96
+ def __init__(self, data=None, distance=binary_distance):
97
+ """Initialize an annotation task.
98
+
99
+ The data argument can be None (to create an empty annotation task) or a sequence of 3-tuples,
100
+ each representing a coder's labeling of an item:
101
+ ``(coder,item,label)``
102
+
103
+ The distance argument is a function taking two arguments (labels) and producing a numerical distance.
104
+ The distance from a label to itself should be zero:
105
+ ``distance(l,l) = 0``
106
+ """
107
+ self.distance = distance
108
+ self.I = set()
109
+ self.K = set()
110
+ self.C = set()
111
+ self.data = []
112
+ if data is not None:
113
+ self.load_array(data)
114
+
115
+ def __str__(self):
116
+ return "\r\n".join(
117
+ map(
118
+ lambda x: "%s\t%s\t%s"
119
+ % (x["coder"], x["item"].replace("_", "\t"), ",".join(x["labels"])),
120
+ self.data,
121
+ )
122
+ )
123
+
124
+ def load_array(self, array):
125
+ """Load an sequence of annotation results, appending to any data already loaded.
126
+
127
+ The argument is a sequence of 3-tuples, each representing a coder's labeling of an item:
128
+ (coder,item,label)
129
+ """
130
+ for coder, item, labels in array:
131
+ self.C.add(coder)
132
+ self.K.add(labels)
133
+ self.I.add(item)
134
+ self.data.append({"coder": coder, "labels": labels, "item": item})
135
+
136
+ def agr(self, cA, cB, i, data=None):
137
+ """Agreement between two coders on a given item"""
138
+ data = data or self.data
139
+ # cfedermann: we don't know what combination of coder/item will come
140
+ # first in x; to avoid StopIteration problems due to assuming an order
141
+ # cA,cB, we allow either for k1 and then look up the missing as k2.
142
+ k1 = next(x for x in data if x["coder"] in (cA, cB) and x["item"] == i)
143
+ if k1["coder"] == cA:
144
+ k2 = next(x for x in data if x["coder"] == cB and x["item"] == i)
145
+ else:
146
+ k2 = next(x for x in data if x["coder"] == cA and x["item"] == i)
147
+
148
+ ret = 1.0 - float(self.distance(k1["labels"], k2["labels"]))
149
+ log.debug("Observed agreement between %s and %s on %s: %f", cA, cB, i, ret)
150
+ log.debug(
151
+ 'Distance between "%r" and "%r": %f', k1["labels"], k2["labels"], 1.0 - ret
152
+ )
153
+ return ret
154
+
155
+ def Nk(self, k):
156
+ return float(sum(1 for x in self.data if x["labels"] == k))
157
+
158
+ def Nik(self, i, k):
159
+ return float(sum(1 for x in self.data if x["item"] == i and x["labels"] == k))
160
+
161
+ def Nck(self, c, k):
162
+ return float(sum(1 for x in self.data if x["coder"] == c and x["labels"] == k))
163
+
164
+ @deprecated("Use Nk, Nik or Nck instead")
165
+ def N(self, k=None, i=None, c=None):
166
+ """Implements the "n-notation" used in Artstein and Poesio (2007)"""
167
+ if k is not None and i is None and c is None:
168
+ ret = self.Nk(k)
169
+ elif k is not None and i is not None and c is None:
170
+ ret = self.Nik(i, k)
171
+ elif k is not None and c is not None and i is None:
172
+ ret = self.Nck(c, k)
173
+ else:
174
+ raise ValueError(
175
+ f"You must pass either i or c, not both! (k={k!r},i={i!r},c={c!r})"
176
+ )
177
+ log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret)
178
+ return ret
179
+
180
+ def _grouped_data(self, field, data=None):
181
+ data = data or self.data
182
+ return groupby(sorted(data, key=itemgetter(field)), itemgetter(field))
183
+
184
+ def Ao(self, cA, cB):
185
+ """Observed agreement between two coders on all items."""
186
+ data = self._grouped_data(
187
+ "item", (x for x in self.data if x["coder"] in (cA, cB))
188
+ )
189
+ ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len(
190
+ self.I
191
+ )
192
+ log.debug("Observed agreement between %s and %s: %f", cA, cB, ret)
193
+ return ret
194
+
195
+ def _pairwise_average(self, function):
196
+ """
197
+ Calculates the average of function results for each coder pair
198
+ """
199
+ total = 0
200
+ n = 0
201
+ s = self.C.copy()
202
+ for cA in self.C:
203
+ s.remove(cA)
204
+ for cB in s:
205
+ total += function(cA, cB)
206
+ n += 1
207
+ ret = total / n
208
+ return ret
209
+
210
+ def avg_Ao(self):
211
+ """Average observed agreement across all coders and items."""
212
+ ret = self._pairwise_average(self.Ao)
213
+ log.debug("Average observed agreement: %f", ret)
214
+ return ret
215
+
216
+ def Do_Kw_pairwise(self, cA, cB, max_distance=1.0):
217
+ """The observed disagreement for the weighted kappa coefficient."""
218
+ total = 0.0
219
+ data = (x for x in self.data if x["coder"] in (cA, cB))
220
+ for i, itemdata in self._grouped_data("item", data):
221
+ # we should have two items; distance doesn't care which comes first
222
+ total += self.distance(next(itemdata)["labels"], next(itemdata)["labels"])
223
+
224
+ ret = total / (len(self.I) * max_distance)
225
+ log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret)
226
+ return ret
227
+
228
+ def Do_Kw(self, max_distance=1.0):
229
+ """Averaged over all labelers"""
230
+ ret = self._pairwise_average(
231
+ lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance)
232
+ )
233
+ log.debug("Observed disagreement: %f", ret)
234
+ return ret
235
+
236
+ # Agreement Coefficients
237
+ def S(self):
238
+ """Bennett, Albert and Goldstein 1954"""
239
+ Ae = 1.0 / len(self.K)
240
+ ret = (self.avg_Ao() - Ae) / (1.0 - Ae)
241
+ return ret
242
+
243
+ def pi(self):
244
+ """Scott 1955; here, multi-pi.
245
+ Equivalent to K from Siegel and Castellan (1988).
246
+
247
+ """
248
+ total = 0.0
249
+ label_freqs = FreqDist(x["labels"] for x in self.data)
250
+ for k, f in label_freqs.items():
251
+ total += f**2
252
+ Ae = total / ((len(self.I) * len(self.C)) ** 2)
253
+ return (self.avg_Ao() - Ae) / (1 - Ae)
254
+
255
+ def Ae_kappa(self, cA, cB):
256
+ Ae = 0.0
257
+ nitems = float(len(self.I))
258
+ label_freqs = ConditionalFreqDist((x["labels"], x["coder"]) for x in self.data)
259
+ for k in label_freqs.conditions():
260
+ Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems)
261
+ return Ae
262
+
263
+ def kappa_pairwise(self, cA, cB):
264
+ """ """
265
+ Ae = self.Ae_kappa(cA, cB)
266
+ ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae)
267
+ log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae)
268
+ return ret
269
+
270
+ def kappa(self):
271
+ """Cohen 1960
272
+ Averages naively over kappas for each coder pair.
273
+
274
+ """
275
+ return self._pairwise_average(self.kappa_pairwise)
276
+
277
+ def multi_kappa(self):
278
+ """Davies and Fleiss 1982
279
+ Averages over observed and expected agreements for each coder pair.
280
+
281
+ """
282
+ Ae = self._pairwise_average(self.Ae_kappa)
283
+ return (self.avg_Ao() - Ae) / (1.0 - Ae)
284
+
285
+ def Disagreement(self, label_freqs):
286
+ total_labels = sum(label_freqs.values())
287
+ pairs = 0.0
288
+ for j, nj in label_freqs.items():
289
+ for l, nl in label_freqs.items():
290
+ pairs += float(nj * nl) * self.distance(l, j)
291
+ return 1.0 * pairs / (total_labels * (total_labels - 1))
292
+
293
+ def alpha(self):
294
+ """Krippendorff 1980"""
295
+ # check for degenerate cases
296
+ if len(self.K) == 0:
297
+ raise ValueError("Cannot calculate alpha, no data present!")
298
+ if len(self.K) == 1:
299
+ log.debug("Only one annotation value, alpha returning 1.")
300
+ return 1
301
+ if len(self.C) == 1 and len(self.I) == 1:
302
+ raise ValueError("Cannot calculate alpha, only one coder and item present!")
303
+
304
+ total_disagreement = 0.0
305
+ total_ratings = 0
306
+ all_valid_labels_freq = FreqDist([])
307
+
308
+ total_do = 0.0 # Total observed disagreement for all items.
309
+ for i, itemdata in self._grouped_data("item"):
310
+ label_freqs = FreqDist(x["labels"] for x in itemdata)
311
+ labels_count = sum(label_freqs.values())
312
+ if labels_count < 2:
313
+ # Ignore the item.
314
+ continue
315
+ all_valid_labels_freq += label_freqs
316
+ total_do += self.Disagreement(label_freqs) * labels_count
317
+
318
+ do = total_do / sum(all_valid_labels_freq.values())
319
+
320
+ de = self.Disagreement(all_valid_labels_freq) # Expected disagreement.
321
+ k_alpha = 1.0 - do / de
322
+
323
+ return k_alpha
324
+
325
+ def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0):
326
+ """Cohen 1968"""
327
+ total = 0.0
328
+ label_freqs = ConditionalFreqDist(
329
+ (x["coder"], x["labels"]) for x in self.data if x["coder"] in (cA, cB)
330
+ )
331
+ for j in self.K:
332
+ for l in self.K:
333
+ total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l)
334
+ De = total / (max_distance * pow(len(self.I), 2))
335
+ log.debug("Expected disagreement between %s and %s: %f", cA, cB, De)
336
+ Do = self.Do_Kw_pairwise(cA, cB)
337
+ ret = 1.0 - (Do / De)
338
+ return ret
339
+
340
+ def weighted_kappa(self, max_distance=1.0):
341
+ """Cohen 1968"""
342
+ return self._pairwise_average(
343
+ lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance)
344
+ )
345
+
346
+
347
+ if __name__ == "__main__":
348
+
349
+ import optparse
350
+ import re
351
+
352
+ from nltk.metrics import distance
353
+
354
+ # process command-line arguments
355
+ parser = optparse.OptionParser()
356
+ parser.add_option(
357
+ "-d",
358
+ "--distance",
359
+ dest="distance",
360
+ default="binary_distance",
361
+ help="distance metric to use",
362
+ )
363
+ parser.add_option(
364
+ "-a",
365
+ "--agreement",
366
+ dest="agreement",
367
+ default="kappa",
368
+ help="agreement coefficient to calculate",
369
+ )
370
+ parser.add_option(
371
+ "-e",
372
+ "--exclude",
373
+ dest="exclude",
374
+ action="append",
375
+ default=[],
376
+ help="coder names to exclude (may be specified multiple times)",
377
+ )
378
+ parser.add_option(
379
+ "-i",
380
+ "--include",
381
+ dest="include",
382
+ action="append",
383
+ default=[],
384
+ help="coder names to include, same format as exclude",
385
+ )
386
+ parser.add_option(
387
+ "-f",
388
+ "--file",
389
+ dest="file",
390
+ help="file to read labelings from, each line with three columns: 'labeler item labels'",
391
+ )
392
+ parser.add_option(
393
+ "-v",
394
+ "--verbose",
395
+ dest="verbose",
396
+ default="0",
397
+ help="how much debugging to print on stderr (0-4)",
398
+ )
399
+ parser.add_option(
400
+ "-c",
401
+ "--columnsep",
402
+ dest="columnsep",
403
+ default="\t",
404
+ help="char/string that separates the three columns in the file, defaults to tab",
405
+ )
406
+ parser.add_option(
407
+ "-l",
408
+ "--labelsep",
409
+ dest="labelsep",
410
+ default=",",
411
+ help="char/string that separates labels (if labelers can assign more than one), defaults to comma",
412
+ )
413
+ parser.add_option(
414
+ "-p",
415
+ "--presence",
416
+ dest="presence",
417
+ default=None,
418
+ help="convert each labeling into 1 or 0, based on presence of LABEL",
419
+ )
420
+ parser.add_option(
421
+ "-T",
422
+ "--thorough",
423
+ dest="thorough",
424
+ default=False,
425
+ action="store_true",
426
+ help="calculate agreement for every subset of the annotators",
427
+ )
428
+ (options, remainder) = parser.parse_args()
429
+
430
+ if not options.file:
431
+ parser.print_help()
432
+ exit()
433
+
434
+ logging.basicConfig(level=50 - 10 * int(options.verbose))
435
+
436
+ # read in data from the specified file
437
+ data = []
438
+ with open(options.file) as infile:
439
+ for l in infile:
440
+ toks = l.split(options.columnsep)
441
+ coder, object_, labels = (
442
+ toks[0],
443
+ str(toks[1:-1]),
444
+ frozenset(toks[-1].strip().split(options.labelsep)),
445
+ )
446
+ if (
447
+ (options.include == options.exclude)
448
+ or (len(options.include) > 0 and coder in options.include)
449
+ or (len(options.exclude) > 0 and coder not in options.exclude)
450
+ ):
451
+ data.append((coder, object_, labels))
452
+
453
+ if options.presence:
454
+ task = AnnotationTask(
455
+ data, getattr(distance, options.distance)(options.presence)
456
+ )
457
+ else:
458
+ task = AnnotationTask(data, getattr(distance, options.distance))
459
+
460
+ if options.thorough:
461
+ pass
462
+ else:
463
+ print(getattr(task, options.agreement)())
464
+
465
+ logging.shutdown()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/aline.py ADDED
@@ -0,0 +1,1354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: ALINE
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Greg Kondrak <[email protected]>
5
+ # Geoff Bacon <[email protected]> (Python port)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ ALINE
11
+ https://webdocs.cs.ualberta.ca/~kondrak/
12
+ Copyright 2002 by Grzegorz Kondrak.
13
+
14
+ ALINE is an algorithm for aligning phonetic sequences, described in [1].
15
+ This module is a port of Kondrak's (2002) ALINE. It provides functions for
16
+ phonetic sequence alignment and similarity analysis. These are useful in
17
+ historical linguistics, sociolinguistics and synchronic phonology.
18
+
19
+ ALINE has parameters that can be tuned for desired output. These parameters are:
20
+ - C_skip, C_sub, C_exp, C_vwl
21
+ - Salience weights
22
+ - Segmental features
23
+
24
+ In this implementation, some parameters have been changed from their default
25
+ values as described in [1], in order to replicate published results. All changes
26
+ are noted in comments.
27
+
28
+ Example usage
29
+ -------------
30
+
31
+ # Get optimal alignment of two phonetic sequences
32
+
33
+ >>> align('θin', 'tenwis') # doctest: +SKIP
34
+ [[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]]
35
+
36
+ [1] G. Kondrak. Algorithms for Language Reconstruction. PhD dissertation,
37
+ University of Toronto.
38
+ """
39
+
40
+ try:
41
+ import numpy as np
42
+ except ImportError:
43
+ np = None
44
+
45
+ # === Constants ===
46
+
47
+ inf = float("inf")
48
+
49
+ # Default values for maximum similarity scores (Kondrak 2002: 54)
50
+ C_skip = -10 # Indels
51
+ C_sub = 35 # Substitutions
52
+ C_exp = 45 # Expansions/compressions
53
+ C_vwl = 5 # Vowel/consonant relative weight (decreased from 10)
54
+
55
+ consonants = [
56
+ "B",
57
+ "N",
58
+ "R",
59
+ "b",
60
+ "c",
61
+ "d",
62
+ "f",
63
+ "g",
64
+ "h",
65
+ "j",
66
+ "k",
67
+ "l",
68
+ "m",
69
+ "n",
70
+ "p",
71
+ "q",
72
+ "r",
73
+ "s",
74
+ "t",
75
+ "v",
76
+ "x",
77
+ "z",
78
+ "ç",
79
+ "ð",
80
+ "ħ",
81
+ "ŋ",
82
+ "ɖ",
83
+ "ɟ",
84
+ "ɢ",
85
+ "ɣ",
86
+ "ɦ",
87
+ "ɬ",
88
+ "ɮ",
89
+ "ɰ",
90
+ "ɱ",
91
+ "ɲ",
92
+ "ɳ",
93
+ "ɴ",
94
+ "ɸ",
95
+ "ɹ",
96
+ "ɻ",
97
+ "ɽ",
98
+ "ɾ",
99
+ "ʀ",
100
+ "ʁ",
101
+ "ʂ",
102
+ "ʃ",
103
+ "ʈ",
104
+ "ʋ",
105
+ "ʐ ",
106
+ "ʒ",
107
+ "ʔ",
108
+ "ʕ",
109
+ "ʙ",
110
+ "ʝ",
111
+ "β",
112
+ "θ",
113
+ "χ",
114
+ "ʐ",
115
+ "w",
116
+ ]
117
+
118
+ # Relevant features for comparing consonants and vowels
119
+ R_c = [
120
+ "aspirated",
121
+ "lateral",
122
+ "manner",
123
+ "nasal",
124
+ "place",
125
+ "retroflex",
126
+ "syllabic",
127
+ "voice",
128
+ ]
129
+ # 'high' taken out of R_v because same as manner
130
+ R_v = [
131
+ "back",
132
+ "lateral",
133
+ "long",
134
+ "manner",
135
+ "nasal",
136
+ "place",
137
+ "retroflex",
138
+ "round",
139
+ "syllabic",
140
+ "voice",
141
+ ]
142
+
143
+ # Flattened feature matrix (Kondrak 2002: 56)
144
+ similarity_matrix = {
145
+ # place
146
+ "bilabial": 1.0,
147
+ "labiodental": 0.95,
148
+ "dental": 0.9,
149
+ "alveolar": 0.85,
150
+ "retroflex": 0.8,
151
+ "palato-alveolar": 0.75,
152
+ "palatal": 0.7,
153
+ "velar": 0.6,
154
+ "uvular": 0.5,
155
+ "pharyngeal": 0.3,
156
+ "glottal": 0.1,
157
+ "labiovelar": 1.0,
158
+ "vowel": -1.0, # added 'vowel'
159
+ # manner
160
+ "stop": 1.0,
161
+ "affricate": 0.9,
162
+ "fricative": 0.85, # increased fricative from 0.8
163
+ "trill": 0.7,
164
+ "tap": 0.65,
165
+ "approximant": 0.6,
166
+ "high vowel": 0.4,
167
+ "mid vowel": 0.2,
168
+ "low vowel": 0.0,
169
+ "vowel2": 0.5, # added vowel
170
+ # high
171
+ "high": 1.0,
172
+ "mid": 0.5,
173
+ "low": 0.0,
174
+ # back
175
+ "front": 1.0,
176
+ "central": 0.5,
177
+ "back": 0.0,
178
+ # binary features
179
+ "plus": 1.0,
180
+ "minus": 0.0,
181
+ }
182
+
183
+ # Relative weights of phonetic features (Kondrak 2002: 55)
184
+ salience = {
185
+ "syllabic": 5,
186
+ "place": 40,
187
+ "manner": 50,
188
+ "voice": 5, # decreased from 10
189
+ "nasal": 20, # increased from 10
190
+ "retroflex": 10,
191
+ "lateral": 10,
192
+ "aspirated": 5,
193
+ "long": 0, # decreased from 1
194
+ "high": 3, # decreased from 5
195
+ "back": 2, # decreased from 5
196
+ "round": 2, # decreased from 5
197
+ }
198
+
199
+ # (Kondrak 2002: 59-60)
200
+ feature_matrix = {
201
+ # Consonants
202
+ "p": {
203
+ "place": "bilabial",
204
+ "manner": "stop",
205
+ "syllabic": "minus",
206
+ "voice": "minus",
207
+ "nasal": "minus",
208
+ "retroflex": "minus",
209
+ "lateral": "minus",
210
+ "aspirated": "minus",
211
+ },
212
+ "b": {
213
+ "place": "bilabial",
214
+ "manner": "stop",
215
+ "syllabic": "minus",
216
+ "voice": "plus",
217
+ "nasal": "minus",
218
+ "retroflex": "minus",
219
+ "lateral": "minus",
220
+ "aspirated": "minus",
221
+ },
222
+ "t": {
223
+ "place": "alveolar",
224
+ "manner": "stop",
225
+ "syllabic": "minus",
226
+ "voice": "minus",
227
+ "nasal": "minus",
228
+ "retroflex": "minus",
229
+ "lateral": "minus",
230
+ "aspirated": "minus",
231
+ },
232
+ "d": {
233
+ "place": "alveolar",
234
+ "manner": "stop",
235
+ "syllabic": "minus",
236
+ "voice": "plus",
237
+ "nasal": "minus",
238
+ "retroflex": "minus",
239
+ "lateral": "minus",
240
+ "aspirated": "minus",
241
+ },
242
+ "ʈ": {
243
+ "place": "retroflex",
244
+ "manner": "stop",
245
+ "syllabic": "minus",
246
+ "voice": "minus",
247
+ "nasal": "minus",
248
+ "retroflex": "plus",
249
+ "lateral": "minus",
250
+ "aspirated": "minus",
251
+ },
252
+ "ɖ": {
253
+ "place": "retroflex",
254
+ "manner": "stop",
255
+ "syllabic": "minus",
256
+ "voice": "plus",
257
+ "nasal": "minus",
258
+ "retroflex": "plus",
259
+ "lateral": "minus",
260
+ "aspirated": "minus",
261
+ },
262
+ "c": {
263
+ "place": "palatal",
264
+ "manner": "stop",
265
+ "syllabic": "minus",
266
+ "voice": "minus",
267
+ "nasal": "minus",
268
+ "retroflex": "minus",
269
+ "lateral": "minus",
270
+ "aspirated": "minus",
271
+ },
272
+ "ɟ": {
273
+ "place": "palatal",
274
+ "manner": "stop",
275
+ "syllabic": "minus",
276
+ "voice": "plus",
277
+ "nasal": "minus",
278
+ "retroflex": "minus",
279
+ "lateral": "minus",
280
+ "aspirated": "minus",
281
+ },
282
+ "k": {
283
+ "place": "velar",
284
+ "manner": "stop",
285
+ "syllabic": "minus",
286
+ "voice": "minus",
287
+ "nasal": "minus",
288
+ "retroflex": "minus",
289
+ "lateral": "minus",
290
+ "aspirated": "minus",
291
+ },
292
+ "g": {
293
+ "place": "velar",
294
+ "manner": "stop",
295
+ "syllabic": "minus",
296
+ "voice": "plus",
297
+ "nasal": "minus",
298
+ "retroflex": "minus",
299
+ "lateral": "minus",
300
+ "aspirated": "minus",
301
+ },
302
+ "q": {
303
+ "place": "uvular",
304
+ "manner": "stop",
305
+ "syllabic": "minus",
306
+ "voice": "minus",
307
+ "nasal": "minus",
308
+ "retroflex": "minus",
309
+ "lateral": "minus",
310
+ "aspirated": "minus",
311
+ },
312
+ "ɢ": {
313
+ "place": "uvular",
314
+ "manner": "stop",
315
+ "syllabic": "minus",
316
+ "voice": "plus",
317
+ "nasal": "minus",
318
+ "retroflex": "minus",
319
+ "lateral": "minus",
320
+ "aspirated": "minus",
321
+ },
322
+ "ʔ": {
323
+ "place": "glottal",
324
+ "manner": "stop",
325
+ "syllabic": "minus",
326
+ "voice": "minus",
327
+ "nasal": "minus",
328
+ "retroflex": "minus",
329
+ "lateral": "minus",
330
+ "aspirated": "minus",
331
+ },
332
+ "m": {
333
+ "place": "bilabial",
334
+ "manner": "stop",
335
+ "syllabic": "minus",
336
+ "voice": "plus",
337
+ "nasal": "plus",
338
+ "retroflex": "minus",
339
+ "lateral": "minus",
340
+ "aspirated": "minus",
341
+ },
342
+ "ɱ": {
343
+ "place": "labiodental",
344
+ "manner": "stop",
345
+ "syllabic": "minus",
346
+ "voice": "plus",
347
+ "nasal": "plus",
348
+ "retroflex": "minus",
349
+ "lateral": "minus",
350
+ "aspirated": "minus",
351
+ },
352
+ "n": {
353
+ "place": "alveolar",
354
+ "manner": "stop",
355
+ "syllabic": "minus",
356
+ "voice": "plus",
357
+ "nasal": "plus",
358
+ "retroflex": "minus",
359
+ "lateral": "minus",
360
+ "aspirated": "minus",
361
+ },
362
+ "ɳ": {
363
+ "place": "retroflex",
364
+ "manner": "stop",
365
+ "syllabic": "minus",
366
+ "voice": "plus",
367
+ "nasal": "plus",
368
+ "retroflex": "plus",
369
+ "lateral": "minus",
370
+ "aspirated": "minus",
371
+ },
372
+ "ɲ": {
373
+ "place": "palatal",
374
+ "manner": "stop",
375
+ "syllabic": "minus",
376
+ "voice": "plus",
377
+ "nasal": "plus",
378
+ "retroflex": "minus",
379
+ "lateral": "minus",
380
+ "aspirated": "minus",
381
+ },
382
+ "ŋ": {
383
+ "place": "velar",
384
+ "manner": "stop",
385
+ "syllabic": "minus",
386
+ "voice": "plus",
387
+ "nasal": "plus",
388
+ "retroflex": "minus",
389
+ "lateral": "minus",
390
+ "aspirated": "minus",
391
+ },
392
+ "ɴ": {
393
+ "place": "uvular",
394
+ "manner": "stop",
395
+ "syllabic": "minus",
396
+ "voice": "plus",
397
+ "nasal": "plus",
398
+ "retroflex": "minus",
399
+ "lateral": "minus",
400
+ "aspirated": "minus",
401
+ },
402
+ "N": {
403
+ "place": "uvular",
404
+ "manner": "stop",
405
+ "syllabic": "minus",
406
+ "voice": "plus",
407
+ "nasal": "plus",
408
+ "retroflex": "minus",
409
+ "lateral": "minus",
410
+ "aspirated": "minus",
411
+ },
412
+ "ʙ": {
413
+ "place": "bilabial",
414
+ "manner": "trill",
415
+ "syllabic": "minus",
416
+ "voice": "plus",
417
+ "nasal": "minus",
418
+ "retroflex": "minus",
419
+ "lateral": "minus",
420
+ "aspirated": "minus",
421
+ },
422
+ "B": {
423
+ "place": "bilabial",
424
+ "manner": "trill",
425
+ "syllabic": "minus",
426
+ "voice": "plus",
427
+ "nasal": "minus",
428
+ "retroflex": "minus",
429
+ "lateral": "minus",
430
+ "aspirated": "minus",
431
+ },
432
+ "r": {
433
+ "place": "alveolar",
434
+ "manner": "trill",
435
+ "syllabic": "minus",
436
+ "voice": "plus",
437
+ "nasal": "minus",
438
+ "retroflex": "plus",
439
+ "lateral": "minus",
440
+ "aspirated": "minus",
441
+ },
442
+ "ʀ": {
443
+ "place": "uvular",
444
+ "manner": "trill",
445
+ "syllabic": "minus",
446
+ "voice": "plus",
447
+ "nasal": "minus",
448
+ "retroflex": "minus",
449
+ "lateral": "minus",
450
+ "aspirated": "minus",
451
+ },
452
+ "R": {
453
+ "place": "uvular",
454
+ "manner": "trill",
455
+ "syllabic": "minus",
456
+ "voice": "plus",
457
+ "nasal": "minus",
458
+ "retroflex": "minus",
459
+ "lateral": "minus",
460
+ "aspirated": "minus",
461
+ },
462
+ "ɾ": {
463
+ "place": "alveolar",
464
+ "manner": "tap",
465
+ "syllabic": "minus",
466
+ "voice": "plus",
467
+ "nasal": "minus",
468
+ "retroflex": "minus",
469
+ "lateral": "minus",
470
+ "aspirated": "minus",
471
+ },
472
+ "ɽ": {
473
+ "place": "retroflex",
474
+ "manner": "tap",
475
+ "syllabic": "minus",
476
+ "voice": "plus",
477
+ "nasal": "minus",
478
+ "retroflex": "plus",
479
+ "lateral": "minus",
480
+ "aspirated": "minus",
481
+ },
482
+ "ɸ": {
483
+ "place": "bilabial",
484
+ "manner": "fricative",
485
+ "syllabic": "minus",
486
+ "voice": "minus",
487
+ "nasal": "minus",
488
+ "retroflex": "minus",
489
+ "lateral": "minus",
490
+ "aspirated": "minus",
491
+ },
492
+ "β": {
493
+ "place": "bilabial",
494
+ "manner": "fricative",
495
+ "syllabic": "minus",
496
+ "voice": "plus",
497
+ "nasal": "minus",
498
+ "retroflex": "minus",
499
+ "lateral": "minus",
500
+ "aspirated": "minus",
501
+ },
502
+ "f": {
503
+ "place": "labiodental",
504
+ "manner": "fricative",
505
+ "syllabic": "minus",
506
+ "voice": "minus",
507
+ "nasal": "minus",
508
+ "retroflex": "minus",
509
+ "lateral": "minus",
510
+ "aspirated": "minus",
511
+ },
512
+ "v": {
513
+ "place": "labiodental",
514
+ "manner": "fricative",
515
+ "syllabic": "minus",
516
+ "voice": "plus",
517
+ "nasal": "minus",
518
+ "retroflex": "minus",
519
+ "lateral": "minus",
520
+ "aspirated": "minus",
521
+ },
522
+ "θ": {
523
+ "place": "dental",
524
+ "manner": "fricative",
525
+ "syllabic": "minus",
526
+ "voice": "minus",
527
+ "nasal": "minus",
528
+ "retroflex": "minus",
529
+ "lateral": "minus",
530
+ "aspirated": "minus",
531
+ },
532
+ "ð": {
533
+ "place": "dental",
534
+ "manner": "fricative",
535
+ "syllabic": "minus",
536
+ "voice": "plus",
537
+ "nasal": "minus",
538
+ "retroflex": "minus",
539
+ "lateral": "minus",
540
+ "aspirated": "minus",
541
+ },
542
+ "s": {
543
+ "place": "alveolar",
544
+ "manner": "fricative",
545
+ "syllabic": "minus",
546
+ "voice": "minus",
547
+ "nasal": "minus",
548
+ "retroflex": "minus",
549
+ "lateral": "minus",
550
+ "aspirated": "minus",
551
+ },
552
+ "z": {
553
+ "place": "alveolar",
554
+ "manner": "fricative",
555
+ "syllabic": "minus",
556
+ "voice": "plus",
557
+ "nasal": "minus",
558
+ "retroflex": "minus",
559
+ "lateral": "minus",
560
+ "aspirated": "minus",
561
+ },
562
+ "ʃ": {
563
+ "place": "palato-alveolar",
564
+ "manner": "fricative",
565
+ "syllabic": "minus",
566
+ "voice": "minus",
567
+ "nasal": "minus",
568
+ "retroflex": "minus",
569
+ "lateral": "minus",
570
+ "aspirated": "minus",
571
+ },
572
+ "ʒ": {
573
+ "place": "palato-alveolar",
574
+ "manner": "fricative",
575
+ "syllabic": "minus",
576
+ "voice": "plus",
577
+ "nasal": "minus",
578
+ "retroflex": "minus",
579
+ "lateral": "minus",
580
+ "aspirated": "minus",
581
+ },
582
+ "ʂ": {
583
+ "place": "retroflex",
584
+ "manner": "fricative",
585
+ "syllabic": "minus",
586
+ "voice": "minus",
587
+ "nasal": "minus",
588
+ "retroflex": "plus",
589
+ "lateral": "minus",
590
+ "aspirated": "minus",
591
+ },
592
+ "ʐ": {
593
+ "place": "retroflex",
594
+ "manner": "fricative",
595
+ "syllabic": "minus",
596
+ "voice": "plus",
597
+ "nasal": "minus",
598
+ "retroflex": "plus",
599
+ "lateral": "minus",
600
+ "aspirated": "minus",
601
+ },
602
+ "ç": {
603
+ "place": "palatal",
604
+ "manner": "fricative",
605
+ "syllabic": "minus",
606
+ "voice": "minus",
607
+ "nasal": "minus",
608
+ "retroflex": "minus",
609
+ "lateral": "minus",
610
+ "aspirated": "minus",
611
+ },
612
+ "ʝ": {
613
+ "place": "palatal",
614
+ "manner": "fricative",
615
+ "syllabic": "minus",
616
+ "voice": "plus",
617
+ "nasal": "minus",
618
+ "retroflex": "minus",
619
+ "lateral": "minus",
620
+ "aspirated": "minus",
621
+ },
622
+ "x": {
623
+ "place": "velar",
624
+ "manner": "fricative",
625
+ "syllabic": "minus",
626
+ "voice": "minus",
627
+ "nasal": "minus",
628
+ "retroflex": "minus",
629
+ "lateral": "minus",
630
+ "aspirated": "minus",
631
+ },
632
+ "ɣ": {
633
+ "place": "velar",
634
+ "manner": "fricative",
635
+ "syllabic": "minus",
636
+ "voice": "plus",
637
+ "nasal": "minus",
638
+ "retroflex": "minus",
639
+ "lateral": "minus",
640
+ "aspirated": "minus",
641
+ },
642
+ "χ": {
643
+ "place": "uvular",
644
+ "manner": "fricative",
645
+ "syllabic": "minus",
646
+ "voice": "minus",
647
+ "nasal": "minus",
648
+ "retroflex": "minus",
649
+ "lateral": "minus",
650
+ "aspirated": "minus",
651
+ },
652
+ "ʁ": {
653
+ "place": "uvular",
654
+ "manner": "fricative",
655
+ "syllabic": "minus",
656
+ "voice": "plus",
657
+ "nasal": "minus",
658
+ "retroflex": "minus",
659
+ "lateral": "minus",
660
+ "aspirated": "minus",
661
+ },
662
+ "ħ": {
663
+ "place": "pharyngeal",
664
+ "manner": "fricative",
665
+ "syllabic": "minus",
666
+ "voice": "minus",
667
+ "nasal": "minus",
668
+ "retroflex": "minus",
669
+ "lateral": "minus",
670
+ "aspirated": "minus",
671
+ },
672
+ "ʕ": {
673
+ "place": "pharyngeal",
674
+ "manner": "fricative",
675
+ "syllabic": "minus",
676
+ "voice": "plus",
677
+ "nasal": "minus",
678
+ "retroflex": "minus",
679
+ "lateral": "minus",
680
+ "aspirated": "minus",
681
+ },
682
+ "h": {
683
+ "place": "glottal",
684
+ "manner": "fricative",
685
+ "syllabic": "minus",
686
+ "voice": "minus",
687
+ "nasal": "minus",
688
+ "retroflex": "minus",
689
+ "lateral": "minus",
690
+ "aspirated": "minus",
691
+ },
692
+ "ɦ": {
693
+ "place": "glottal",
694
+ "manner": "fricative",
695
+ "syllabic": "minus",
696
+ "voice": "plus",
697
+ "nasal": "minus",
698
+ "retroflex": "minus",
699
+ "lateral": "minus",
700
+ "aspirated": "minus",
701
+ },
702
+ "ɬ": {
703
+ "place": "alveolar",
704
+ "manner": "fricative",
705
+ "syllabic": "minus",
706
+ "voice": "minus",
707
+ "nasal": "minus",
708
+ "retroflex": "minus",
709
+ "lateral": "plus",
710
+ "aspirated": "minus",
711
+ },
712
+ "ɮ": {
713
+ "place": "alveolar",
714
+ "manner": "fricative",
715
+ "syllabic": "minus",
716
+ "voice": "plus",
717
+ "nasal": "minus",
718
+ "retroflex": "minus",
719
+ "lateral": "plus",
720
+ "aspirated": "minus",
721
+ },
722
+ "ʋ": {
723
+ "place": "labiodental",
724
+ "manner": "approximant",
725
+ "syllabic": "minus",
726
+ "voice": "plus",
727
+ "nasal": "minus",
728
+ "retroflex": "minus",
729
+ "lateral": "minus",
730
+ "aspirated": "minus",
731
+ },
732
+ "ɹ": {
733
+ "place": "alveolar",
734
+ "manner": "approximant",
735
+ "syllabic": "minus",
736
+ "voice": "plus",
737
+ "nasal": "minus",
738
+ "retroflex": "minus",
739
+ "lateral": "minus",
740
+ "aspirated": "minus",
741
+ },
742
+ "ɻ": {
743
+ "place": "retroflex",
744
+ "manner": "approximant",
745
+ "syllabic": "minus",
746
+ "voice": "plus",
747
+ "nasal": "minus",
748
+ "retroflex": "plus",
749
+ "lateral": "minus",
750
+ "aspirated": "minus",
751
+ },
752
+ "j": {
753
+ "place": "palatal",
754
+ "manner": "approximant",
755
+ "syllabic": "minus",
756
+ "voice": "plus",
757
+ "nasal": "minus",
758
+ "retroflex": "minus",
759
+ "lateral": "minus",
760
+ "aspirated": "minus",
761
+ },
762
+ "ɰ": {
763
+ "place": "velar",
764
+ "manner": "approximant",
765
+ "syllabic": "minus",
766
+ "voice": "plus",
767
+ "nasal": "minus",
768
+ "retroflex": "minus",
769
+ "lateral": "minus",
770
+ "aspirated": "minus",
771
+ },
772
+ "l": {
773
+ "place": "alveolar",
774
+ "manner": "approximant",
775
+ "syllabic": "minus",
776
+ "voice": "plus",
777
+ "nasal": "minus",
778
+ "retroflex": "minus",
779
+ "lateral": "plus",
780
+ "aspirated": "minus",
781
+ },
782
+ "w": {
783
+ "place": "labiovelar",
784
+ "manner": "approximant",
785
+ "syllabic": "minus",
786
+ "voice": "plus",
787
+ "nasal": "minus",
788
+ "retroflex": "minus",
789
+ "lateral": "minus",
790
+ "aspirated": "minus",
791
+ },
792
+ # Vowels
793
+ "i": {
794
+ "place": "vowel",
795
+ "manner": "vowel2",
796
+ "syllabic": "plus",
797
+ "voice": "plus",
798
+ "nasal": "minus",
799
+ "retroflex": "minus",
800
+ "lateral": "minus",
801
+ "high": "high",
802
+ "back": "front",
803
+ "round": "minus",
804
+ "long": "minus",
805
+ "aspirated": "minus",
806
+ },
807
+ "y": {
808
+ "place": "vowel",
809
+ "manner": "vowel2",
810
+ "syllabic": "plus",
811
+ "voice": "plus",
812
+ "nasal": "minus",
813
+ "retroflex": "minus",
814
+ "lateral": "minus",
815
+ "high": "high",
816
+ "back": "front",
817
+ "round": "plus",
818
+ "long": "minus",
819
+ "aspirated": "minus",
820
+ },
821
+ "e": {
822
+ "place": "vowel",
823
+ "manner": "vowel2",
824
+ "syllabic": "plus",
825
+ "voice": "plus",
826
+ "nasal": "minus",
827
+ "retroflex": "minus",
828
+ "lateral": "minus",
829
+ "high": "mid",
830
+ "back": "front",
831
+ "round": "minus",
832
+ "long": "minus",
833
+ "aspirated": "minus",
834
+ },
835
+ "E": {
836
+ "place": "vowel",
837
+ "manner": "vowel2",
838
+ "syllabic": "plus",
839
+ "voice": "plus",
840
+ "nasal": "minus",
841
+ "retroflex": "minus",
842
+ "lateral": "minus",
843
+ "high": "mid",
844
+ "back": "front",
845
+ "round": "minus",
846
+ "long": "plus",
847
+ "aspirated": "minus",
848
+ },
849
+ "ø": {
850
+ "place": "vowel",
851
+ "manner": "vowel2",
852
+ "syllabic": "plus",
853
+ "voice": "plus",
854
+ "nasal": "minus",
855
+ "retroflex": "minus",
856
+ "lateral": "minus",
857
+ "high": "mid",
858
+ "back": "front",
859
+ "round": "plus",
860
+ "long": "minus",
861
+ "aspirated": "minus",
862
+ },
863
+ "ɛ": {
864
+ "place": "vowel",
865
+ "manner": "vowel2",
866
+ "syllabic": "plus",
867
+ "voice": "plus",
868
+ "nasal": "minus",
869
+ "retroflex": "minus",
870
+ "lateral": "minus",
871
+ "high": "mid",
872
+ "back": "front",
873
+ "round": "minus",
874
+ "long": "minus",
875
+ "aspirated": "minus",
876
+ },
877
+ "œ": {
878
+ "place": "vowel",
879
+ "manner": "vowel2",
880
+ "syllabic": "plus",
881
+ "voice": "plus",
882
+ "nasal": "minus",
883
+ "retroflex": "minus",
884
+ "lateral": "minus",
885
+ "high": "mid",
886
+ "back": "front",
887
+ "round": "plus",
888
+ "long": "minus",
889
+ "aspirated": "minus",
890
+ },
891
+ "æ": {
892
+ "place": "vowel",
893
+ "manner": "vowel2",
894
+ "syllabic": "plus",
895
+ "voice": "plus",
896
+ "nasal": "minus",
897
+ "retroflex": "minus",
898
+ "lateral": "minus",
899
+ "high": "low",
900
+ "back": "front",
901
+ "round": "minus",
902
+ "long": "minus",
903
+ "aspirated": "minus",
904
+ },
905
+ "a": {
906
+ "place": "vowel",
907
+ "manner": "vowel2",
908
+ "syllabic": "plus",
909
+ "voice": "plus",
910
+ "nasal": "minus",
911
+ "retroflex": "minus",
912
+ "lateral": "minus",
913
+ "high": "low",
914
+ "back": "front",
915
+ "round": "minus",
916
+ "long": "minus",
917
+ "aspirated": "minus",
918
+ },
919
+ "A": {
920
+ "place": "vowel",
921
+ "manner": "vowel2",
922
+ "syllabic": "plus",
923
+ "voice": "plus",
924
+ "nasal": "minus",
925
+ "retroflex": "minus",
926
+ "lateral": "minus",
927
+ "high": "low",
928
+ "back": "front",
929
+ "round": "minus",
930
+ "long": "plus",
931
+ "aspirated": "minus",
932
+ },
933
+ "ɨ": {
934
+ "place": "vowel",
935
+ "manner": "vowel2",
936
+ "syllabic": "plus",
937
+ "voice": "plus",
938
+ "nasal": "minus",
939
+ "retroflex": "minus",
940
+ "lateral": "minus",
941
+ "high": "high",
942
+ "back": "central",
943
+ "round": "minus",
944
+ "long": "minus",
945
+ "aspirated": "minus",
946
+ },
947
+ "ʉ": {
948
+ "place": "vowel",
949
+ "manner": "vowel2",
950
+ "syllabic": "plus",
951
+ "voice": "plus",
952
+ "nasal": "minus",
953
+ "retroflex": "minus",
954
+ "lateral": "minus",
955
+ "high": "high",
956
+ "back": "central",
957
+ "round": "plus",
958
+ "long": "minus",
959
+ "aspirated": "minus",
960
+ },
961
+ "ə": {
962
+ "place": "vowel",
963
+ "manner": "vowel2",
964
+ "syllabic": "plus",
965
+ "voice": "plus",
966
+ "nasal": "minus",
967
+ "retroflex": "minus",
968
+ "lateral": "minus",
969
+ "high": "mid",
970
+ "back": "central",
971
+ "round": "minus",
972
+ "long": "minus",
973
+ "aspirated": "minus",
974
+ },
975
+ "u": {
976
+ "place": "vowel",
977
+ "manner": "vowel2",
978
+ "syllabic": "plus",
979
+ "voice": "plus",
980
+ "nasal": "minus",
981
+ "retroflex": "minus",
982
+ "lateral": "minus",
983
+ "high": "high",
984
+ "back": "back",
985
+ "round": "plus",
986
+ "long": "minus",
987
+ "aspirated": "minus",
988
+ },
989
+ "U": {
990
+ "place": "vowel",
991
+ "manner": "vowel2",
992
+ "syllabic": "plus",
993
+ "voice": "plus",
994
+ "nasal": "minus",
995
+ "retroflex": "minus",
996
+ "lateral": "minus",
997
+ "high": "high",
998
+ "back": "back",
999
+ "round": "plus",
1000
+ "long": "plus",
1001
+ "aspirated": "minus",
1002
+ },
1003
+ "o": {
1004
+ "place": "vowel",
1005
+ "manner": "vowel2",
1006
+ "syllabic": "plus",
1007
+ "voice": "plus",
1008
+ "nasal": "minus",
1009
+ "retroflex": "minus",
1010
+ "lateral": "minus",
1011
+ "high": "mid",
1012
+ "back": "back",
1013
+ "round": "plus",
1014
+ "long": "minus",
1015
+ "aspirated": "minus",
1016
+ },
1017
+ "O": {
1018
+ "place": "vowel",
1019
+ "manner": "vowel2",
1020
+ "syllabic": "plus",
1021
+ "voice": "plus",
1022
+ "nasal": "minus",
1023
+ "retroflex": "minus",
1024
+ "lateral": "minus",
1025
+ "high": "mid",
1026
+ "back": "back",
1027
+ "round": "plus",
1028
+ "long": "plus",
1029
+ "aspirated": "minus",
1030
+ },
1031
+ "ɔ": {
1032
+ "place": "vowel",
1033
+ "manner": "vowel2",
1034
+ "syllabic": "plus",
1035
+ "voice": "plus",
1036
+ "nasal": "minus",
1037
+ "retroflex": "minus",
1038
+ "lateral": "minus",
1039
+ "high": "mid",
1040
+ "back": "back",
1041
+ "round": "plus",
1042
+ "long": "minus",
1043
+ "aspirated": "minus",
1044
+ },
1045
+ "ɒ": {
1046
+ "place": "vowel",
1047
+ "manner": "vowel2",
1048
+ "syllabic": "plus",
1049
+ "voice": "plus",
1050
+ "nasal": "minus",
1051
+ "retroflex": "minus",
1052
+ "lateral": "minus",
1053
+ "high": "low",
1054
+ "back": "back",
1055
+ "round": "minus",
1056
+ "long": "minus",
1057
+ "aspirated": "minus",
1058
+ },
1059
+ "I": {
1060
+ "place": "vowel",
1061
+ "manner": "vowel2",
1062
+ "syllabic": "plus",
1063
+ "voice": "plus",
1064
+ "nasal": "minus",
1065
+ "retroflex": "minus",
1066
+ "lateral": "minus",
1067
+ "high": "high",
1068
+ "back": "front",
1069
+ "round": "minus",
1070
+ "long": "plus",
1071
+ "aspirated": "minus",
1072
+ },
1073
+ }
1074
+
1075
+ # === Algorithm ===
1076
+
1077
+
1078
+ def align(str1, str2, epsilon=0):
1079
+ """
1080
+ Compute the alignment of two phonetic strings.
1081
+
1082
+ :param str str1: First string to be aligned
1083
+ :param str str2: Second string to be aligned
1084
+
1085
+ :type epsilon: float (0.0 to 1.0)
1086
+ :param epsilon: Adjusts threshold similarity score for near-optimal alignments
1087
+
1088
+ :rtype: list(list(tuple(str, str)))
1089
+ :return: Alignment(s) of str1 and str2
1090
+
1091
+ (Kondrak 2002: 51)
1092
+ """
1093
+ if np is None:
1094
+ raise ImportError("You need numpy in order to use the align function")
1095
+
1096
+ assert 0.0 <= epsilon <= 1.0, "Epsilon must be between 0.0 and 1.0."
1097
+ m = len(str1)
1098
+ n = len(str2)
1099
+ # This includes Kondrak's initialization of row 0 and column 0 to all 0s.
1100
+ S = np.zeros((m + 1, n + 1), dtype=float)
1101
+
1102
+ # If i <= 1 or j <= 1, don't allow expansions as it doesn't make sense,
1103
+ # and breaks array and string indices. Make sure they never get chosen
1104
+ # by setting them to -inf.
1105
+ for i in range(1, m + 1):
1106
+ for j in range(1, n + 1):
1107
+ edit1 = S[i - 1, j] + sigma_skip(str1[i - 1])
1108
+ edit2 = S[i, j - 1] + sigma_skip(str2[j - 1])
1109
+ edit3 = S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1])
1110
+ if i > 1:
1111
+ edit4 = S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i])
1112
+ else:
1113
+ edit4 = -inf
1114
+ if j > 1:
1115
+ edit5 = S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j])
1116
+ else:
1117
+ edit5 = -inf
1118
+ S[i, j] = max(edit1, edit2, edit3, edit4, edit5, 0)
1119
+
1120
+ T = (1 - epsilon) * np.amax(S) # Threshold score for near-optimal alignments
1121
+
1122
+ alignments = []
1123
+ for i in range(1, m + 1):
1124
+ for j in range(1, n + 1):
1125
+ if S[i, j] >= T:
1126
+ alignments.append(_retrieve(i, j, 0, S, T, str1, str2, []))
1127
+ return alignments
1128
+
1129
+
1130
+ def _retrieve(i, j, s, S, T, str1, str2, out):
1131
+ """
1132
+ Retrieve the path through the similarity matrix S starting at (i, j).
1133
+
1134
+ :rtype: list(tuple(str, str))
1135
+ :return: Alignment of str1 and str2
1136
+ """
1137
+ if S[i, j] == 0:
1138
+ return out
1139
+ else:
1140
+ if j > 1 and S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + s >= T:
1141
+ out.insert(0, (str1[i - 1], str2[j - 2 : j]))
1142
+ _retrieve(
1143
+ i - 1,
1144
+ j - 2,
1145
+ s + sigma_exp(str1[i - 1], str2[j - 2 : j]),
1146
+ S,
1147
+ T,
1148
+ str1,
1149
+ str2,
1150
+ out,
1151
+ )
1152
+ elif (
1153
+ i > 1 and S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + s >= T
1154
+ ):
1155
+ out.insert(0, (str1[i - 2 : i], str2[j - 1]))
1156
+ _retrieve(
1157
+ i - 2,
1158
+ j - 1,
1159
+ s + sigma_exp(str2[j - 1], str1[i - 2 : i]),
1160
+ S,
1161
+ T,
1162
+ str1,
1163
+ str2,
1164
+ out,
1165
+ )
1166
+ elif S[i, j - 1] + sigma_skip(str2[j - 1]) + s >= T:
1167
+ out.insert(0, ("-", str2[j - 1]))
1168
+ _retrieve(i, j - 1, s + sigma_skip(str2[j - 1]), S, T, str1, str2, out)
1169
+ elif S[i - 1, j] + sigma_skip(str1[i - 1]) + s >= T:
1170
+ out.insert(0, (str1[i - 1], "-"))
1171
+ _retrieve(i - 1, j, s + sigma_skip(str1[i - 1]), S, T, str1, str2, out)
1172
+ elif S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + s >= T:
1173
+ out.insert(0, (str1[i - 1], str2[j - 1]))
1174
+ _retrieve(
1175
+ i - 1,
1176
+ j - 1,
1177
+ s + sigma_sub(str1[i - 1], str2[j - 1]),
1178
+ S,
1179
+ T,
1180
+ str1,
1181
+ str2,
1182
+ out,
1183
+ )
1184
+ return out
1185
+
1186
+
1187
+ def sigma_skip(p):
1188
+ """
1189
+ Returns score of an indel of P.
1190
+
1191
+ (Kondrak 2002: 54)
1192
+ """
1193
+ return C_skip
1194
+
1195
+
1196
+ def sigma_sub(p, q):
1197
+ """
1198
+ Returns score of a substitution of P with Q.
1199
+
1200
+ (Kondrak 2002: 54)
1201
+ """
1202
+ return C_sub - delta(p, q) - V(p) - V(q)
1203
+
1204
+
1205
+ def sigma_exp(p, q):
1206
+ """
1207
+ Returns score of an expansion/compression.
1208
+
1209
+ (Kondrak 2002: 54)
1210
+ """
1211
+ q1 = q[0]
1212
+ q2 = q[1]
1213
+ return C_exp - delta(p, q1) - delta(p, q2) - V(p) - max(V(q1), V(q2))
1214
+
1215
+
1216
+ def delta(p, q):
1217
+ """
1218
+ Return weighted sum of difference between P and Q.
1219
+
1220
+ (Kondrak 2002: 54)
1221
+ """
1222
+ features = R(p, q)
1223
+ total = 0
1224
+ for f in features:
1225
+ total += diff(p, q, f) * salience[f]
1226
+ return total
1227
+
1228
+
1229
+ def diff(p, q, f):
1230
+ """
1231
+ Returns difference between phonetic segments P and Q for feature F.
1232
+
1233
+ (Kondrak 2002: 52, 54)
1234
+ """
1235
+ p_features, q_features = feature_matrix[p], feature_matrix[q]
1236
+ return abs(similarity_matrix[p_features[f]] - similarity_matrix[q_features[f]])
1237
+
1238
+
1239
+ def R(p, q):
1240
+ """
1241
+ Return relevant features for segment comparison.
1242
+
1243
+ (Kondrak 2002: 54)
1244
+ """
1245
+ if p in consonants or q in consonants:
1246
+ return R_c
1247
+ return R_v
1248
+
1249
+
1250
+ def V(p):
1251
+ """
1252
+ Return vowel weight if P is vowel.
1253
+
1254
+ (Kondrak 2002: 54)
1255
+ """
1256
+ if p in consonants:
1257
+ return 0
1258
+ return C_vwl
1259
+
1260
+
1261
+ # === Test ===
1262
+
1263
+
1264
+ def demo():
1265
+ """
1266
+ A demonstration of the result of aligning phonetic sequences
1267
+ used in Kondrak's (2002) dissertation.
1268
+ """
1269
+ data = [pair.split(",") for pair in cognate_data.split("\n")]
1270
+ for pair in data:
1271
+ alignment = align(pair[0], pair[1])[0]
1272
+ alignment = [f"({a[0]}, {a[1]})" for a in alignment]
1273
+ alignment = " ".join(alignment)
1274
+ print(f"{pair[0]} ~ {pair[1]} : {alignment}")
1275
+
1276
+
1277
+ cognate_data = """jo,ʒə
1278
+ tu,ty
1279
+ nosotros,nu
1280
+ kjen,ki
1281
+ ke,kwa
1282
+ todos,tu
1283
+ una,ən
1284
+ dos,dø
1285
+ tres,trwa
1286
+ ombre,om
1287
+ arbol,arbrə
1288
+ pluma,plym
1289
+ kabeθa,kap
1290
+ boka,buʃ
1291
+ pje,pje
1292
+ koraθon,kœr
1293
+ ber,vwar
1294
+ benir,vənir
1295
+ deθir,dir
1296
+ pobre,povrə
1297
+ ðis,dIzes
1298
+ ðæt,das
1299
+ wat,vas
1300
+ nat,nixt
1301
+ loŋ,laŋ
1302
+ mæn,man
1303
+ fleʃ,flajʃ
1304
+ bləd,blyt
1305
+ feðər,fEdər
1306
+ hær,hAr
1307
+ ir,Or
1308
+ aj,awgə
1309
+ nowz,nAzə
1310
+ mawθ,munt
1311
+ təŋ,tsuŋə
1312
+ fut,fys
1313
+ nij,knI
1314
+ hænd,hant
1315
+ hart,herts
1316
+ livər,lEbər
1317
+ ænd,ante
1318
+ æt,ad
1319
+ blow,flAre
1320
+ ir,awris
1321
+ ijt,edere
1322
+ fiʃ,piʃkis
1323
+ flow,fluere
1324
+ staɾ,stella
1325
+ ful,plenus
1326
+ græs,gramen
1327
+ hart,kordis
1328
+ horn,korny
1329
+ aj,ego
1330
+ nij,genU
1331
+ məðər,mAter
1332
+ mawntən,mons
1333
+ nejm,nomen
1334
+ njuw,nowus
1335
+ wən,unus
1336
+ rawnd,rotundus
1337
+ sow,suere
1338
+ sit,sedere
1339
+ θrij,tres
1340
+ tuwθ,dentis
1341
+ θin,tenwis
1342
+ kinwawa,kenuaʔ
1343
+ nina,nenah
1344
+ napewa,napɛw
1345
+ wapimini,wapemen
1346
+ namesa,namɛʔs
1347
+ okimawa,okemaw
1348
+ ʃiʃipa,seʔsep
1349
+ ahkohkwa,ahkɛh
1350
+ pematesiweni,pematesewen
1351
+ asenja,aʔsɛn"""
1352
+
1353
+ if __name__ == "__main__":
1354
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/association.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Ngram Association Measures
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joel Nothman <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Provides scoring functions for a number of association measures through a
10
+ generic, abstract implementation in ``NgramAssocMeasures``, and n-specific
11
+ ``BigramAssocMeasures`` and ``TrigramAssocMeasures``.
12
+ """
13
+
14
+ import math as _math
15
+ from abc import ABCMeta, abstractmethod
16
+ from functools import reduce
17
+
18
+ _log2 = lambda x: _math.log2(x)
19
+ _ln = _math.log
20
+
21
+ _product = lambda s: reduce(lambda x, y: x * y, s)
22
+
23
+ _SMALL = 1e-20
24
+
25
+ try:
26
+ from scipy.stats import fisher_exact
27
+ except ImportError:
28
+
29
+ def fisher_exact(*_args, **_kwargs):
30
+ raise NotImplementedError
31
+
32
+
33
+ ### Indices to marginals arguments:
34
+
35
+ NGRAM = 0
36
+ """Marginals index for the ngram count"""
37
+
38
+ UNIGRAMS = -2
39
+ """Marginals index for a tuple of each unigram count"""
40
+
41
+ TOTAL = -1
42
+ """Marginals index for the number of words in the data"""
43
+
44
+
45
+ class NgramAssocMeasures(metaclass=ABCMeta):
46
+ """
47
+ An abstract class defining a collection of generic association measures.
48
+ Each public method returns a score, taking the following arguments::
49
+
50
+ score_fn(count_of_ngram,
51
+ (count_of_n-1gram_1, ..., count_of_n-1gram_j),
52
+ (count_of_n-2gram_1, ..., count_of_n-2gram_k),
53
+ ...,
54
+ (count_of_1gram_1, ..., count_of_1gram_n),
55
+ count_of_total_words)
56
+
57
+ See ``BigramAssocMeasures`` and ``TrigramAssocMeasures``
58
+
59
+ Inheriting classes should define a property _n, and a method _contingency
60
+ which calculates contingency values from marginals in order for all
61
+ association measures defined here to be usable.
62
+ """
63
+
64
+ _n = 0
65
+
66
+ @staticmethod
67
+ @abstractmethod
68
+ def _contingency(*marginals):
69
+ """Calculates values of a contingency table from marginal values."""
70
+ raise NotImplementedError(
71
+ "The contingency table is not available" "in the general ngram case"
72
+ )
73
+
74
+ @staticmethod
75
+ @abstractmethod
76
+ def _marginals(*contingency):
77
+ """Calculates values of contingency table marginals from its values."""
78
+ raise NotImplementedError(
79
+ "The contingency table is not available" "in the general ngram case"
80
+ )
81
+
82
+ @classmethod
83
+ def _expected_values(cls, cont):
84
+ """Calculates expected values for a contingency table."""
85
+ n_all = sum(cont)
86
+ bits = [1 << i for i in range(cls._n)]
87
+
88
+ # For each contingency table cell
89
+ for i in range(len(cont)):
90
+ # Yield the expected value
91
+ yield (
92
+ _product(
93
+ sum(cont[x] for x in range(2**cls._n) if (x & j) == (i & j))
94
+ for j in bits
95
+ )
96
+ / (n_all ** (cls._n - 1))
97
+ )
98
+
99
+ @staticmethod
100
+ def raw_freq(*marginals):
101
+ """Scores ngrams by their frequency"""
102
+ return marginals[NGRAM] / marginals[TOTAL]
103
+
104
+ @classmethod
105
+ def student_t(cls, *marginals):
106
+ """Scores ngrams using Student's t test with independence hypothesis
107
+ for unigrams, as in Manning and Schutze 5.3.1.
108
+ """
109
+ return (
110
+ marginals[NGRAM]
111
+ - _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1))
112
+ ) / (marginals[NGRAM] + _SMALL) ** 0.5
113
+
114
+ @classmethod
115
+ def chi_sq(cls, *marginals):
116
+ """Scores ngrams using Pearson's chi-square as in Manning and Schutze
117
+ 5.3.3.
118
+ """
119
+ cont = cls._contingency(*marginals)
120
+ exps = cls._expected_values(cont)
121
+ return sum((obs - exp) ** 2 / (exp + _SMALL) for obs, exp in zip(cont, exps))
122
+
123
+ @staticmethod
124
+ def mi_like(*marginals, **kwargs):
125
+ """Scores ngrams using a variant of mutual information. The keyword
126
+ argument power sets an exponent (default 3) for the numerator. No
127
+ logarithm of the result is calculated.
128
+ """
129
+ return marginals[NGRAM] ** kwargs.get("power", 3) / _product(
130
+ marginals[UNIGRAMS]
131
+ )
132
+
133
+ @classmethod
134
+ def pmi(cls, *marginals):
135
+ """Scores ngrams by pointwise mutual information, as in Manning and
136
+ Schutze 5.4.
137
+ """
138
+ return _log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) - _log2(
139
+ _product(marginals[UNIGRAMS])
140
+ )
141
+
142
+ @classmethod
143
+ def likelihood_ratio(cls, *marginals):
144
+ """Scores ngrams using likelihood ratios as in Manning and Schutze 5.3.4."""
145
+ cont = cls._contingency(*marginals)
146
+ return 2 * sum(
147
+ obs * _ln(obs / (exp + _SMALL) + _SMALL)
148
+ for obs, exp in zip(cont, cls._expected_values(cont))
149
+ )
150
+
151
+ @classmethod
152
+ def poisson_stirling(cls, *marginals):
153
+ """Scores ngrams using the Poisson-Stirling measure."""
154
+ exp = _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1))
155
+ return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1)
156
+
157
+ @classmethod
158
+ def jaccard(cls, *marginals):
159
+ """Scores ngrams using the Jaccard index."""
160
+ cont = cls._contingency(*marginals)
161
+ return cont[0] / sum(cont[:-1])
162
+
163
+
164
+ class BigramAssocMeasures(NgramAssocMeasures):
165
+ """
166
+ A collection of bigram association measures. Each association measure
167
+ is provided as a function with three arguments::
168
+
169
+ bigram_score_fn(n_ii, (n_ix, n_xi), n_xx)
170
+
171
+ The arguments constitute the marginals of a contingency table, counting
172
+ the occurrences of particular events in a corpus. The letter i in the
173
+ suffix refers to the appearance of the word in question, while x indicates
174
+ the appearance of any word. Thus, for example:
175
+
176
+ - n_ii counts ``(w1, w2)``, i.e. the bigram being scored
177
+ - n_ix counts ``(w1, *)``
178
+ - n_xi counts ``(*, w2)``
179
+ - n_xx counts ``(*, *)``, i.e. any bigram
180
+
181
+ This may be shown with respect to a contingency table::
182
+
183
+ w1 ~w1
184
+ ------ ------
185
+ w2 | n_ii | n_oi | = n_xi
186
+ ------ ------
187
+ ~w2 | n_io | n_oo |
188
+ ------ ------
189
+ = n_ix TOTAL = n_xx
190
+ """
191
+
192
+ _n = 2
193
+
194
+ @staticmethod
195
+ def _contingency(n_ii, n_ix_xi_tuple, n_xx):
196
+ """Calculates values of a bigram contingency table from marginal values."""
197
+ (n_ix, n_xi) = n_ix_xi_tuple
198
+ n_oi = n_xi - n_ii
199
+ n_io = n_ix - n_ii
200
+ return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io)
201
+
202
+ @staticmethod
203
+ def _marginals(n_ii, n_oi, n_io, n_oo):
204
+ """Calculates values of contingency table marginals from its values."""
205
+ return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii)
206
+
207
+ @staticmethod
208
+ def _expected_values(cont):
209
+ """Calculates expected values for a contingency table."""
210
+ n_xx = sum(cont)
211
+ # For each contingency table cell
212
+ for i in range(4):
213
+ yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx
214
+
215
+ @classmethod
216
+ def phi_sq(cls, *marginals):
217
+ """Scores bigrams using phi-square, the square of the Pearson correlation
218
+ coefficient.
219
+ """
220
+ n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals)
221
+
222
+ return (n_ii * n_oo - n_io * n_oi) ** 2 / (
223
+ (n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo)
224
+ )
225
+
226
+ @classmethod
227
+ def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx):
228
+ """Scores bigrams using chi-square, i.e. phi-sq multiplied by the number
229
+ of bigrams, as in Manning and Schutze 5.3.3.
230
+ """
231
+ (n_ix, n_xi) = n_ix_xi_tuple
232
+ return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx)
233
+
234
+ @classmethod
235
+ def fisher(cls, *marginals):
236
+ """Scores bigrams using Fisher's Exact Test (Pedersen 1996). Less
237
+ sensitive to small counts than PMI or Chi Sq, but also more expensive
238
+ to compute. Requires scipy.
239
+ """
240
+
241
+ n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals)
242
+
243
+ (odds, pvalue) = fisher_exact([[n_ii, n_io], [n_oi, n_oo]], alternative="less")
244
+ return pvalue
245
+
246
+ @staticmethod
247
+ def dice(n_ii, n_ix_xi_tuple, n_xx):
248
+ """Scores bigrams using Dice's coefficient."""
249
+ (n_ix, n_xi) = n_ix_xi_tuple
250
+ return 2 * n_ii / (n_ix + n_xi)
251
+
252
+
253
+ class TrigramAssocMeasures(NgramAssocMeasures):
254
+ """
255
+ A collection of trigram association measures. Each association measure
256
+ is provided as a function with four arguments::
257
+
258
+ trigram_score_fn(n_iii,
259
+ (n_iix, n_ixi, n_xii),
260
+ (n_ixx, n_xix, n_xxi),
261
+ n_xxx)
262
+
263
+ The arguments constitute the marginals of a contingency table, counting
264
+ the occurrences of particular events in a corpus. The letter i in the
265
+ suffix refers to the appearance of the word in question, while x indicates
266
+ the appearance of any word. Thus, for example:
267
+
268
+ - n_iii counts ``(w1, w2, w3)``, i.e. the trigram being scored
269
+ - n_ixx counts ``(w1, *, *)``
270
+ - n_xxx counts ``(*, *, *)``, i.e. any trigram
271
+ """
272
+
273
+ _n = 3
274
+
275
+ @staticmethod
276
+ def _contingency(n_iii, n_iix_tuple, n_ixx_tuple, n_xxx):
277
+ """Calculates values of a trigram contingency table (or cube) from
278
+ marginal values.
279
+ >>> TrigramAssocMeasures._contingency(1, (1, 1, 1), (1, 73, 1), 2000)
280
+ (1, 0, 0, 0, 0, 72, 0, 1927)
281
+ """
282
+ (n_iix, n_ixi, n_xii) = n_iix_tuple
283
+ (n_ixx, n_xix, n_xxi) = n_ixx_tuple
284
+ n_oii = n_xii - n_iii
285
+ n_ioi = n_ixi - n_iii
286
+ n_iio = n_iix - n_iii
287
+ n_ooi = n_xxi - n_iii - n_oii - n_ioi
288
+ n_oio = n_xix - n_iii - n_oii - n_iio
289
+ n_ioo = n_ixx - n_iii - n_ioi - n_iio
290
+ n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo
291
+
292
+ return (n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo)
293
+
294
+ @staticmethod
295
+ def _marginals(*contingency):
296
+ """Calculates values of contingency table marginals from its values.
297
+ >>> TrigramAssocMeasures._marginals(1, 0, 0, 0, 0, 72, 0, 1927)
298
+ (1, (1, 1, 1), (1, 73, 1), 2000)
299
+ """
300
+ n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo = contingency
301
+ return (
302
+ n_iii,
303
+ (n_iii + n_iio, n_iii + n_ioi, n_iii + n_oii),
304
+ (
305
+ n_iii + n_ioi + n_iio + n_ioo,
306
+ n_iii + n_oii + n_iio + n_oio,
307
+ n_iii + n_oii + n_ioi + n_ooi,
308
+ ),
309
+ sum(contingency),
310
+ )
311
+
312
+
313
+ class QuadgramAssocMeasures(NgramAssocMeasures):
314
+ """
315
+ A collection of quadgram association measures. Each association measure
316
+ is provided as a function with five arguments::
317
+
318
+ trigram_score_fn(n_iiii,
319
+ (n_iiix, n_iixi, n_ixii, n_xiii),
320
+ (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
321
+ (n_ixxx, n_xixx, n_xxix, n_xxxi),
322
+ n_all)
323
+
324
+ The arguments constitute the marginals of a contingency table, counting
325
+ the occurrences of particular events in a corpus. The letter i in the
326
+ suffix refers to the appearance of the word in question, while x indicates
327
+ the appearance of any word. Thus, for example:
328
+
329
+ - n_iiii counts ``(w1, w2, w3, w4)``, i.e. the quadgram being scored
330
+ - n_ixxi counts ``(w1, *, *, w4)``
331
+ - n_xxxx counts ``(*, *, *, *)``, i.e. any quadgram
332
+ """
333
+
334
+ _n = 4
335
+
336
+ @staticmethod
337
+ def _contingency(n_iiii, n_iiix_tuple, n_iixx_tuple, n_ixxx_tuple, n_xxxx):
338
+ """Calculates values of a quadgram contingency table from
339
+ marginal values.
340
+ """
341
+ (n_iiix, n_iixi, n_ixii, n_xiii) = n_iiix_tuple
342
+ (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix) = n_iixx_tuple
343
+ (n_ixxx, n_xixx, n_xxix, n_xxxi) = n_ixxx_tuple
344
+ n_oiii = n_xiii - n_iiii
345
+ n_ioii = n_ixii - n_iiii
346
+ n_iioi = n_iixi - n_iiii
347
+ n_ooii = n_xxii - n_iiii - n_oiii - n_ioii
348
+ n_oioi = n_xixi - n_iiii - n_oiii - n_iioi
349
+ n_iooi = n_ixxi - n_iiii - n_ioii - n_iioi
350
+ n_oooi = n_xxxi - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_iooi - n_oioi
351
+ n_iiio = n_iiix - n_iiii
352
+ n_oiio = n_xiix - n_iiii - n_oiii - n_iiio
353
+ n_ioio = n_ixix - n_iiii - n_ioii - n_iiio
354
+ n_ooio = n_xxix - n_iiii - n_oiii - n_ioii - n_iiio - n_ooii - n_ioio - n_oiio
355
+ n_iioo = n_iixx - n_iiii - n_iioi - n_iiio
356
+ n_oioo = n_xixx - n_iiii - n_oiii - n_iioi - n_iiio - n_oioi - n_oiio - n_iioo
357
+ n_iooo = n_ixxx - n_iiii - n_ioii - n_iioi - n_iiio - n_iooi - n_iioo - n_ioio
358
+ n_oooo = (
359
+ n_xxxx
360
+ - n_iiii
361
+ - n_oiii
362
+ - n_ioii
363
+ - n_iioi
364
+ - n_ooii
365
+ - n_oioi
366
+ - n_iooi
367
+ - n_oooi
368
+ - n_iiio
369
+ - n_oiio
370
+ - n_ioio
371
+ - n_ooio
372
+ - n_iioo
373
+ - n_oioo
374
+ - n_iooo
375
+ )
376
+
377
+ return (
378
+ n_iiii,
379
+ n_oiii,
380
+ n_ioii,
381
+ n_ooii,
382
+ n_iioi,
383
+ n_oioi,
384
+ n_iooi,
385
+ n_oooi,
386
+ n_iiio,
387
+ n_oiio,
388
+ n_ioio,
389
+ n_ooio,
390
+ n_iioo,
391
+ n_oioo,
392
+ n_iooo,
393
+ n_oooo,
394
+ )
395
+
396
+ @staticmethod
397
+ def _marginals(*contingency):
398
+ """Calculates values of contingency table marginals from its values.
399
+ QuadgramAssocMeasures._marginals(1, 0, 2, 46, 552, 825, 2577, 34967, 1, 0, 2, 48, 7250, 9031, 28585, 356653)
400
+ (1, (2, 553, 3, 1), (7804, 6, 3132, 1378, 49, 2), (38970, 17660, 100, 38970), 440540)
401
+ """
402
+ (
403
+ n_iiii,
404
+ n_oiii,
405
+ n_ioii,
406
+ n_ooii,
407
+ n_iioi,
408
+ n_oioi,
409
+ n_iooi,
410
+ n_oooi,
411
+ n_iiio,
412
+ n_oiio,
413
+ n_ioio,
414
+ n_ooio,
415
+ n_iioo,
416
+ n_oioo,
417
+ n_iooo,
418
+ n_oooo,
419
+ ) = contingency
420
+
421
+ n_iiix = n_iiii + n_iiio
422
+ n_iixi = n_iiii + n_iioi
423
+ n_ixii = n_iiii + n_ioii
424
+ n_xiii = n_iiii + n_oiii
425
+
426
+ n_iixx = n_iiii + n_iioi + n_iiio + n_iioo
427
+ n_ixix = n_iiii + n_ioii + n_iiio + n_ioio
428
+ n_ixxi = n_iiii + n_ioii + n_iioi + n_iooi
429
+ n_xixi = n_iiii + n_oiii + n_iioi + n_oioi
430
+ n_xxii = n_iiii + n_oiii + n_ioii + n_ooii
431
+ n_xiix = n_iiii + n_oiii + n_iiio + n_oiio
432
+
433
+ n_ixxx = n_iiii + n_ioii + n_iioi + n_iiio + n_iooi + n_iioo + n_ioio + n_iooo
434
+ n_xixx = n_iiii + n_oiii + n_iioi + n_iiio + n_oioi + n_oiio + n_iioo + n_oioo
435
+ n_xxix = n_iiii + n_oiii + n_ioii + n_iiio + n_ooii + n_ioio + n_oiio + n_ooio
436
+ n_xxxi = n_iiii + n_oiii + n_ioii + n_iioi + n_ooii + n_iooi + n_oioi + n_oooi
437
+
438
+ n_all = sum(contingency)
439
+
440
+ return (
441
+ n_iiii,
442
+ (n_iiix, n_iixi, n_ixii, n_xiii),
443
+ (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
444
+ (n_ixxx, n_xixx, n_xxix, n_xxxi),
445
+ n_all,
446
+ )
447
+
448
+
449
+ class ContingencyMeasures:
450
+ """Wraps NgramAssocMeasures classes such that the arguments of association
451
+ measures are contingency table values rather than marginals.
452
+ """
453
+
454
+ def __init__(self, measures):
455
+ """Constructs a ContingencyMeasures given a NgramAssocMeasures class"""
456
+ self.__class__.__name__ = "Contingency" + measures.__class__.__name__
457
+ for k in dir(measures):
458
+ if k.startswith("__"):
459
+ continue
460
+ v = getattr(measures, k)
461
+ if not k.startswith("_"):
462
+ v = self._make_contingency_fn(measures, v)
463
+ setattr(self, k, v)
464
+
465
+ @staticmethod
466
+ def _make_contingency_fn(measures, old_fn):
467
+ """From an association measure function, produces a new function which
468
+ accepts contingency table values as its arguments.
469
+ """
470
+
471
+ def res(*contingency):
472
+ return old_fn(*measures._marginals(*contingency))
473
+
474
+ res.__doc__ = old_fn.__doc__
475
+ res.__name__ = old_fn.__name__
476
+ return res
llmeval-env/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Confusion Matrices
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Tom Aarsen <>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from nltk.probability import FreqDist
11
+
12
+
13
+ class ConfusionMatrix:
14
+ """
15
+ The confusion matrix between a list of reference values and a
16
+ corresponding list of test values. Entry *[r,t]* of this
17
+ matrix is a count of the number of times that the reference value
18
+ *r* corresponds to the test value *t*. E.g.:
19
+
20
+ >>> from nltk.metrics import ConfusionMatrix
21
+ >>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
22
+ >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
23
+ >>> cm = ConfusionMatrix(ref, test)
24
+ >>> print(cm['NN', 'NN'])
25
+ 3
26
+
27
+ Note that the diagonal entries *Ri=Tj* of this matrix
28
+ corresponds to correct values; and the off-diagonal entries
29
+ correspond to incorrect values.
30
+ """
31
+
32
+ def __init__(self, reference, test, sort_by_count=False):
33
+ """
34
+ Construct a new confusion matrix from a list of reference
35
+ values and a corresponding list of test values.
36
+
37
+ :type reference: list
38
+ :param reference: An ordered list of reference values.
39
+ :type test: list
40
+ :param test: A list of values to compare against the
41
+ corresponding reference values.
42
+ :raise ValueError: If ``reference`` and ``length`` do not have
43
+ the same length.
44
+ """
45
+ if len(reference) != len(test):
46
+ raise ValueError("Lists must have the same length.")
47
+
48
+ # Get a list of all values.
49
+ if sort_by_count:
50
+ ref_fdist = FreqDist(reference)
51
+ test_fdist = FreqDist(test)
52
+
53
+ def key(v):
54
+ return -(ref_fdist[v] + test_fdist[v])
55
+
56
+ values = sorted(set(reference + test), key=key)
57
+ else:
58
+ values = sorted(set(reference + test))
59
+
60
+ # Construct a value->index dictionary
61
+ indices = {val: i for (i, val) in enumerate(values)}
62
+
63
+ # Make a confusion matrix table.
64
+ confusion = [[0 for _ in values] for _ in values]
65
+ max_conf = 0 # Maximum confusion
66
+ for w, g in zip(reference, test):
67
+ confusion[indices[w]][indices[g]] += 1
68
+ max_conf = max(max_conf, confusion[indices[w]][indices[g]])
69
+
70
+ #: A list of all values in ``reference`` or ``test``.
71
+ self._values = values
72
+ #: A dictionary mapping values in ``self._values`` to their indices.
73
+ self._indices = indices
74
+ #: The confusion matrix itself (as a list of lists of counts).
75
+ self._confusion = confusion
76
+ #: The greatest count in ``self._confusion`` (used for printing).
77
+ self._max_conf = max_conf
78
+ #: The total number of values in the confusion matrix.
79
+ self._total = len(reference)
80
+ #: The number of correct (on-diagonal) values in the matrix.
81
+ self._correct = sum(confusion[i][i] for i in range(len(values)))
82
+
83
+ def __getitem__(self, li_lj_tuple):
84
+ """
85
+ :return: The number of times that value ``li`` was expected and
86
+ value ``lj`` was given.
87
+ :rtype: int
88
+ """
89
+ (li, lj) = li_lj_tuple
90
+ i = self._indices[li]
91
+ j = self._indices[lj]
92
+ return self._confusion[i][j]
93
+
94
+ def __repr__(self):
95
+ return f"<ConfusionMatrix: {self._correct}/{self._total} correct>"
96
+
97
+ def __str__(self):
98
+ return self.pretty_format()
99
+
100
+ def pretty_format(
101
+ self,
102
+ show_percents=False,
103
+ values_in_chart=True,
104
+ truncate=None,
105
+ sort_by_count=False,
106
+ ):
107
+ """
108
+ :return: A multi-line string representation of this confusion matrix.
109
+ :type truncate: int
110
+ :param truncate: If specified, then only show the specified
111
+ number of values. Any sorting (e.g., sort_by_count)
112
+ will be performed before truncation.
113
+ :param sort_by_count: If true, then sort by the count of each
114
+ label in the reference data. I.e., labels that occur more
115
+ frequently in the reference label will be towards the left
116
+ edge of the matrix, and labels that occur less frequently
117
+ will be towards the right edge.
118
+
119
+ @todo: add marginals?
120
+ """
121
+ confusion = self._confusion
122
+
123
+ values = self._values
124
+ if sort_by_count:
125
+ values = sorted(
126
+ values, key=lambda v: -sum(self._confusion[self._indices[v]])
127
+ )
128
+
129
+ if truncate:
130
+ values = values[:truncate]
131
+
132
+ if values_in_chart:
133
+ value_strings = ["%s" % val for val in values]
134
+ else:
135
+ value_strings = [str(n + 1) for n in range(len(values))]
136
+
137
+ # Construct a format string for row values
138
+ valuelen = max(len(val) for val in value_strings)
139
+ value_format = "%" + repr(valuelen) + "s | "
140
+ # Construct a format string for matrix entries
141
+ if show_percents:
142
+ entrylen = 6
143
+ entry_format = "%5.1f%%"
144
+ zerostr = " ."
145
+ else:
146
+ entrylen = len(repr(self._max_conf))
147
+ entry_format = "%" + repr(entrylen) + "d"
148
+ zerostr = " " * (entrylen - 1) + "."
149
+
150
+ # Write the column values.
151
+ s = ""
152
+ for i in range(valuelen):
153
+ s += (" " * valuelen) + " |"
154
+ for val in value_strings:
155
+ if i >= valuelen - len(val):
156
+ s += val[i - valuelen + len(val)].rjust(entrylen + 1)
157
+ else:
158
+ s += " " * (entrylen + 1)
159
+ s += " |\n"
160
+
161
+ # Write a dividing line
162
+ s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values)))
163
+
164
+ # Write the entries.
165
+ for val, li in zip(value_strings, values):
166
+ i = self._indices[li]
167
+ s += value_format % val
168
+ for lj in values:
169
+ j = self._indices[lj]
170
+ if confusion[i][j] == 0:
171
+ s += zerostr
172
+ elif show_percents:
173
+ s += entry_format % (100.0 * confusion[i][j] / self._total)
174
+ else:
175
+ s += entry_format % confusion[i][j]
176
+ if i == j:
177
+ prevspace = s.rfind(" ")
178
+ s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">"
179
+ else:
180
+ s += " "
181
+ s += "|\n"
182
+
183
+ # Write a dividing line
184
+ s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values)))
185
+
186
+ # Write a key
187
+ s += "(row = reference; col = test)\n"
188
+ if not values_in_chart:
189
+ s += "Value key:\n"
190
+ for i, value in enumerate(values):
191
+ s += "%6d: %s\n" % (i + 1, value)
192
+
193
+ return s
194
+
195
+ def key(self):
196
+ values = self._values
197
+ str = "Value key:\n"
198
+ indexlen = len(repr(len(values) - 1))
199
+ key_format = " %" + repr(indexlen) + "d: %s\n"
200
+ for i in range(len(values)):
201
+ str += key_format % (i, values[i])
202
+
203
+ return str
204
+
205
+ def recall(self, value):
206
+ """Given a value in the confusion matrix, return the recall
207
+ that corresponds to this value. The recall is defined as:
208
+
209
+ - *r* = true positive / (true positive + false positive)
210
+
211
+ and can loosely be considered the ratio of how often ``value``
212
+ was predicted correctly relative to how often ``value`` was
213
+ the true result.
214
+
215
+ :param value: value used in the ConfusionMatrix
216
+ :return: the recall corresponding to ``value``.
217
+ :rtype: float
218
+ """
219
+ # Number of times `value` was correct, and also predicted
220
+ TP = self[value, value]
221
+ # Number of times `value` was correct
222
+ TP_FN = sum(self[value, pred_value] for pred_value in self._values)
223
+ if TP_FN == 0:
224
+ return 0.0
225
+ return TP / TP_FN
226
+
227
+ def precision(self, value):
228
+ """Given a value in the confusion matrix, return the precision
229
+ that corresponds to this value. The precision is defined as:
230
+
231
+ - *p* = true positive / (true positive + false negative)
232
+
233
+ and can loosely be considered the ratio of how often ``value``
234
+ was predicted correctly relative to the number of predictions
235
+ for ``value``.
236
+
237
+ :param value: value used in the ConfusionMatrix
238
+ :return: the precision corresponding to ``value``.
239
+ :rtype: float
240
+ """
241
+ # Number of times `value` was correct, and also predicted
242
+ TP = self[value, value]
243
+ # Number of times `value` was predicted
244
+ TP_FP = sum(self[real_value, value] for real_value in self._values)
245
+ if TP_FP == 0:
246
+ return 0.0
247
+ return TP / TP_FP
248
+
249
+ def f_measure(self, value, alpha=0.5):
250
+ """
251
+ Given a value used in the confusion matrix, return the f-measure
252
+ that corresponds to this value. The f-measure is the harmonic mean
253
+ of the ``precision`` and ``recall``, weighted by ``alpha``.
254
+ In particular, given the precision *p* and recall *r* defined by:
255
+
256
+ - *p* = true positive / (true positive + false negative)
257
+ - *r* = true positive / (true positive + false positive)
258
+
259
+ The f-measure is:
260
+
261
+ - *1/(alpha/p + (1-alpha)/r)*
262
+
263
+ With ``alpha = 0.5``, this reduces to:
264
+
265
+ - *2pr / (p + r)*
266
+
267
+ :param value: value used in the ConfusionMatrix
268
+ :param alpha: Ratio of the cost of false negative compared to false
269
+ positives. Defaults to 0.5, where the costs are equal.
270
+ :type alpha: float
271
+ :return: the F-measure corresponding to ``value``.
272
+ :rtype: float
273
+ """
274
+ p = self.precision(value)
275
+ r = self.recall(value)
276
+ if p == 0.0 or r == 0.0:
277
+ return 0.0
278
+ return 1.0 / (alpha / p + (1 - alpha) / r)
279
+
280
+ def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False):
281
+ """
282
+ Tabulate the **recall**, **precision** and **f-measure**
283
+ for each value in this confusion matrix.
284
+
285
+ >>> reference = "DET NN VB DET JJ NN NN IN DET NN".split()
286
+ >>> test = "DET VB VB DET NN NN NN IN DET NN".split()
287
+ >>> cm = ConfusionMatrix(reference, test)
288
+ >>> print(cm.evaluate())
289
+ Tag | Prec. | Recall | F-measure
290
+ ----+--------+--------+-----------
291
+ DET | 1.0000 | 1.0000 | 1.0000
292
+ IN | 1.0000 | 1.0000 | 1.0000
293
+ JJ | 0.0000 | 0.0000 | 0.0000
294
+ NN | 0.7500 | 0.7500 | 0.7500
295
+ VB | 0.5000 | 1.0000 | 0.6667
296
+ <BLANKLINE>
297
+
298
+ :param alpha: Ratio of the cost of false negative compared to false
299
+ positives, as used in the f-measure computation. Defaults to 0.5,
300
+ where the costs are equal.
301
+ :type alpha: float
302
+ :param truncate: If specified, then only show the specified
303
+ number of values. Any sorting (e.g., sort_by_count)
304
+ will be performed before truncation. Defaults to None
305
+ :type truncate: int, optional
306
+ :param sort_by_count: Whether to sort the outputs on frequency
307
+ in the reference label. Defaults to False.
308
+ :type sort_by_count: bool, optional
309
+ :return: A tabulated recall, precision and f-measure string
310
+ :rtype: str
311
+ """
312
+ tags = self._values
313
+
314
+ # Apply keyword parameters
315
+ if sort_by_count:
316
+ tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]]))
317
+ if truncate:
318
+ tags = tags[:truncate]
319
+
320
+ tag_column_len = max(max(len(tag) for tag in tags), 3)
321
+
322
+ # Construct the header
323
+ s = (
324
+ f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n"
325
+ f"{'-' * tag_column_len}-+--------+--------+-----------\n"
326
+ )
327
+
328
+ # Construct the body
329
+ for tag in tags:
330
+ s += (
331
+ f"{tag:>{tag_column_len}} | "
332
+ f"{self.precision(tag):<6.4f} | "
333
+ f"{self.recall(tag):<6.4f} | "
334
+ f"{self.f_measure(tag, alpha=alpha):.4f}\n"
335
+ )
336
+
337
+ return s
338
+
339
+
340
+ def demo():
341
+ reference = "DET NN VB DET JJ NN NN IN DET NN".split()
342
+ test = "DET VB VB DET NN NN NN IN DET NN".split()
343
+ print("Reference =", reference)
344
+ print("Test =", test)
345
+ print("Confusion matrix:")
346
+ print(ConfusionMatrix(reference, test))
347
+ print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
348
+
349
+ print(ConfusionMatrix(reference, test).recall("VB"))
350
+
351
+
352
+ if __name__ == "__main__":
353
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/distance.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Distance Metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Tom Lippincott <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+ #
10
+
11
+ """
12
+ Distance Metrics.
13
+
14
+ Compute the distance between two items (usually strings).
15
+ As metrics, they must satisfy the following three requirements:
16
+
17
+ 1. d(a, a) = 0
18
+ 2. d(a, b) >= 0
19
+ 3. d(a, c) <= d(a, b) + d(b, c)
20
+ """
21
+
22
+ import operator
23
+ import warnings
24
+
25
+
26
+ def _edit_dist_init(len1, len2):
27
+ lev = []
28
+ for i in range(len1):
29
+ lev.append([0] * len2) # initialize 2D array to zero
30
+ for i in range(len1):
31
+ lev[i][0] = i # column 0: 0,1,2,3,4,...
32
+ for j in range(len2):
33
+ lev[0][j] = j # row 0: 0,1,2,3,4,...
34
+ return lev
35
+
36
+
37
+ def _last_left_t_init(sigma):
38
+ return {c: 0 for c in sigma}
39
+
40
+
41
+ def _edit_dist_step(
42
+ lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False
43
+ ):
44
+ c1 = s1[i - 1]
45
+ c2 = s2[j - 1]
46
+
47
+ # skipping a character in s1
48
+ a = lev[i - 1][j] + 1
49
+ # skipping a character in s2
50
+ b = lev[i][j - 1] + 1
51
+ # substitution
52
+ c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0)
53
+
54
+ # transposition
55
+ d = c + 1 # never picked by default
56
+ if transpositions and last_left > 0 and last_right > 0:
57
+ d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1
58
+
59
+ # pick the cheapest
60
+ lev[i][j] = min(a, b, c, d)
61
+
62
+
63
+ def edit_distance(s1, s2, substitution_cost=1, transpositions=False):
64
+ """
65
+ Calculate the Levenshtein edit-distance between two strings.
66
+ The edit distance is the number of characters that need to be
67
+ substituted, inserted, or deleted, to transform s1 into s2. For
68
+ example, transforming "rain" to "shine" requires three steps,
69
+ consisting of two substitutions and one insertion:
70
+ "rain" -> "sain" -> "shin" -> "shine". These operations could have
71
+ been done in other orders, but at least three steps are needed.
72
+
73
+ Allows specifying the cost of substitution edits (e.g., "a" -> "b"),
74
+ because sometimes it makes sense to assign greater penalties to
75
+ substitutions.
76
+
77
+ This also optionally allows transposition edits (e.g., "ab" -> "ba"),
78
+ though this is disabled by default.
79
+
80
+ :param s1, s2: The strings to be analysed
81
+ :param transpositions: Whether to allow transposition edits
82
+ :type s1: str
83
+ :type s2: str
84
+ :type substitution_cost: int
85
+ :type transpositions: bool
86
+ :rtype: int
87
+ """
88
+ # set up a 2-D array
89
+ len1 = len(s1)
90
+ len2 = len(s2)
91
+ lev = _edit_dist_init(len1 + 1, len2 + 1)
92
+
93
+ # retrieve alphabet
94
+ sigma = set()
95
+ sigma.update(s1)
96
+ sigma.update(s2)
97
+
98
+ # set up table to remember positions of last seen occurrence in s1
99
+ last_left_t = _last_left_t_init(sigma)
100
+
101
+ # iterate over the array
102
+ # i and j start from 1 and not 0 to stay close to the wikipedia pseudo-code
103
+ # see https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
104
+ for i in range(1, len1 + 1):
105
+ last_right_buf = 0
106
+ for j in range(1, len2 + 1):
107
+ last_left = last_left_t[s2[j - 1]]
108
+ last_right = last_right_buf
109
+ if s1[i - 1] == s2[j - 1]:
110
+ last_right_buf = j
111
+ _edit_dist_step(
112
+ lev,
113
+ i,
114
+ j,
115
+ s1,
116
+ s2,
117
+ last_left,
118
+ last_right,
119
+ substitution_cost=substitution_cost,
120
+ transpositions=transpositions,
121
+ )
122
+ last_left_t[s1[i - 1]] = i
123
+ return lev[len1][len2]
124
+
125
+
126
+ def _edit_dist_backtrace(lev):
127
+ i, j = len(lev) - 1, len(lev[0]) - 1
128
+ alignment = [(i, j)]
129
+
130
+ while (i, j) != (0, 0):
131
+ directions = [
132
+ (i - 1, j - 1), # substitution
133
+ (i - 1, j), # skip s1
134
+ (i, j - 1), # skip s2
135
+ ]
136
+
137
+ direction_costs = (
138
+ (lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j))
139
+ for i, j in directions
140
+ )
141
+ _, (i, j) = min(direction_costs, key=operator.itemgetter(0))
142
+
143
+ alignment.append((i, j))
144
+ return list(reversed(alignment))
145
+
146
+
147
+ def edit_distance_align(s1, s2, substitution_cost=1):
148
+ """
149
+ Calculate the minimum Levenshtein edit-distance based alignment
150
+ mapping between two strings. The alignment finds the mapping
151
+ from string s1 to s2 that minimizes the edit distance cost.
152
+ For example, mapping "rain" to "shine" would involve 2
153
+ substitutions, 2 matches and an insertion resulting in
154
+ the following mapping:
155
+ [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)]
156
+ NB: (0, 0) is the start state without any letters associated
157
+ See more: https://web.stanford.edu/class/cs124/lec/med.pdf
158
+
159
+ In case of multiple valid minimum-distance alignments, the
160
+ backtrace has the following operation precedence:
161
+
162
+ 1. Substitute s1 and s2 characters
163
+ 2. Skip s1 character
164
+ 3. Skip s2 character
165
+
166
+ The backtrace is carried out in reverse string order.
167
+
168
+ This function does not support transposition.
169
+
170
+ :param s1, s2: The strings to be aligned
171
+ :type s1: str
172
+ :type s2: str
173
+ :type substitution_cost: int
174
+ :rtype: List[Tuple(int, int)]
175
+ """
176
+ # set up a 2-D array
177
+ len1 = len(s1)
178
+ len2 = len(s2)
179
+ lev = _edit_dist_init(len1 + 1, len2 + 1)
180
+
181
+ # iterate over the array
182
+ for i in range(len1):
183
+ for j in range(len2):
184
+ _edit_dist_step(
185
+ lev,
186
+ i + 1,
187
+ j + 1,
188
+ s1,
189
+ s2,
190
+ 0,
191
+ 0,
192
+ substitution_cost=substitution_cost,
193
+ transpositions=False,
194
+ )
195
+
196
+ # backtrace to find alignment
197
+ alignment = _edit_dist_backtrace(lev)
198
+ return alignment
199
+
200
+
201
+ def binary_distance(label1, label2):
202
+ """Simple equality test.
203
+
204
+ 0.0 if the labels are identical, 1.0 if they are different.
205
+
206
+ >>> from nltk.metrics import binary_distance
207
+ >>> binary_distance(1,1)
208
+ 0.0
209
+
210
+ >>> binary_distance(1,3)
211
+ 1.0
212
+ """
213
+
214
+ return 0.0 if label1 == label2 else 1.0
215
+
216
+
217
+ def jaccard_distance(label1, label2):
218
+ """Distance metric comparing set-similarity."""
219
+ return (len(label1.union(label2)) - len(label1.intersection(label2))) / len(
220
+ label1.union(label2)
221
+ )
222
+
223
+
224
+ def masi_distance(label1, label2):
225
+ """Distance metric that takes into account partial agreement when multiple
226
+ labels are assigned.
227
+
228
+ >>> from nltk.metrics import masi_distance
229
+ >>> masi_distance(set([1, 2]), set([1, 2, 3, 4]))
230
+ 0.665
231
+
232
+ Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI)
233
+ for Semantic and Pragmatic Annotation.
234
+ """
235
+
236
+ len_intersection = len(label1.intersection(label2))
237
+ len_union = len(label1.union(label2))
238
+ len_label1 = len(label1)
239
+ len_label2 = len(label2)
240
+ if len_label1 == len_label2 and len_label1 == len_intersection:
241
+ m = 1
242
+ elif len_intersection == min(len_label1, len_label2):
243
+ m = 0.67
244
+ elif len_intersection > 0:
245
+ m = 0.33
246
+ else:
247
+ m = 0
248
+
249
+ return 1 - len_intersection / len_union * m
250
+
251
+
252
+ def interval_distance(label1, label2):
253
+ """Krippendorff's interval distance metric
254
+
255
+ >>> from nltk.metrics import interval_distance
256
+ >>> interval_distance(1,10)
257
+ 81
258
+
259
+ Krippendorff 1980, Content Analysis: An Introduction to its Methodology
260
+ """
261
+
262
+ try:
263
+ return pow(label1 - label2, 2)
264
+ # return pow(list(label1)[0]-list(label2)[0],2)
265
+ except:
266
+ print("non-numeric labels not supported with interval distance")
267
+
268
+
269
+ def presence(label):
270
+ """Higher-order function to test presence of a given label"""
271
+
272
+ return lambda x, y: 1.0 * ((label in x) == (label in y))
273
+
274
+
275
+ def fractional_presence(label):
276
+ return (
277
+ lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y)
278
+ or 0.0 * (label not in x and label not in y)
279
+ or abs(1.0 / len(x)) * (label in x and label not in y)
280
+ or (1.0 / len(y)) * (label not in x and label in y)
281
+ )
282
+
283
+
284
+ def custom_distance(file):
285
+ data = {}
286
+ with open(file) as infile:
287
+ for l in infile:
288
+ labelA, labelB, dist = l.strip().split("\t")
289
+ labelA = frozenset([labelA])
290
+ labelB = frozenset([labelB])
291
+ data[frozenset([labelA, labelB])] = float(dist)
292
+ return lambda x, y: data[frozenset([x, y])]
293
+
294
+
295
+ def jaro_similarity(s1, s2):
296
+ """
297
+ Computes the Jaro similarity between 2 sequences from:
298
+
299
+ Matthew A. Jaro (1989). Advances in record linkage methodology
300
+ as applied to the 1985 census of Tampa Florida. Journal of the
301
+ American Statistical Association. 84 (406): 414-20.
302
+
303
+ The Jaro distance between is the min no. of single-character transpositions
304
+ required to change one word into another. The Jaro similarity formula from
305
+ https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance :
306
+
307
+ ``jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)``
308
+
309
+ where
310
+ - `|s_i|` is the length of string `s_i`
311
+ - `m` is the no. of matching characters
312
+ - `t` is the half no. of possible transpositions.
313
+ """
314
+ # First, store the length of the strings
315
+ # because they will be re-used several times.
316
+ len_s1, len_s2 = len(s1), len(s2)
317
+
318
+ # The upper bound of the distance for being a matched character.
319
+ match_bound = max(len_s1, len_s2) // 2 - 1
320
+
321
+ # Initialize the counts for matches and transpositions.
322
+ matches = 0 # no.of matched characters in s1 and s2
323
+ transpositions = 0 # no. of transpositions between s1 and s2
324
+ flagged_1 = [] # positions in s1 which are matches to some character in s2
325
+ flagged_2 = [] # positions in s2 which are matches to some character in s1
326
+
327
+ # Iterate through sequences, check for matches and compute transpositions.
328
+ for i in range(len_s1): # Iterate through each character.
329
+ upperbound = min(i + match_bound, len_s2 - 1)
330
+ lowerbound = max(0, i - match_bound)
331
+ for j in range(lowerbound, upperbound + 1):
332
+ if s1[i] == s2[j] and j not in flagged_2:
333
+ matches += 1
334
+ flagged_1.append(i)
335
+ flagged_2.append(j)
336
+ break
337
+ flagged_2.sort()
338
+ for i, j in zip(flagged_1, flagged_2):
339
+ if s1[i] != s2[j]:
340
+ transpositions += 1
341
+
342
+ if matches == 0:
343
+ return 0
344
+ else:
345
+ return (
346
+ 1
347
+ / 3
348
+ * (
349
+ matches / len_s1
350
+ + matches / len_s2
351
+ + (matches - transpositions // 2) / matches
352
+ )
353
+ )
354
+
355
+
356
+ def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4):
357
+ """
358
+ The Jaro Winkler distance is an extension of the Jaro similarity in:
359
+
360
+ William E. Winkler. 1990. String Comparator Metrics and Enhanced
361
+ Decision Rules in the Fellegi-Sunter Model of Record Linkage.
362
+ Proceedings of the Section on Survey Research Methods.
363
+ American Statistical Association: 354-359.
364
+
365
+ such that:
366
+
367
+ jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) )
368
+
369
+ where,
370
+
371
+ - jaro_sim is the output from the Jaro Similarity,
372
+ see jaro_similarity()
373
+ - l is the length of common prefix at the start of the string
374
+ - this implementation provides an upperbound for the l value
375
+ to keep the prefixes.A common value of this upperbound is 4.
376
+ - p is the constant scaling factor to overweigh common prefixes.
377
+ The Jaro-Winkler similarity will fall within the [0, 1] bound,
378
+ given that max(p)<=0.25 , default is p=0.1 in Winkler (1990)
379
+
380
+
381
+ Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf
382
+ from "Table 5 Comparison of String Comparators Rescaled between 0 and 1"
383
+
384
+ >>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"),
385
+ ... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"),
386
+ ... ("dixon", "dickson"), ("billy", "susan")]
387
+
388
+ >>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000]
389
+ >>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000]
390
+
391
+ One way to match the values on the Winkler's paper is to provide a different
392
+ p scaling factor for different pairs of strings, e.g.
393
+
394
+ >>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1]
395
+
396
+ >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
397
+ ... assert round(jaro_similarity(s1, s2), 3) == jscore
398
+ ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
399
+
400
+
401
+ Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from
402
+ "Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names"
403
+
404
+ >>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'),
405
+ ... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'),
406
+ ... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'),
407
+ ... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'),
408
+ ... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'),
409
+ ... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'),
410
+ ... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'),
411
+ ... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')]
412
+
413
+ >>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926,
414
+ ... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905,
415
+ ... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000]
416
+
417
+ >>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926,
418
+ ... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943,
419
+ ... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000]
420
+
421
+ One way to match the values on the Winkler's paper is to provide a different
422
+ p scaling factor for different pairs of strings, e.g.
423
+
424
+ >>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20,
425
+ ... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
426
+
427
+
428
+ >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
429
+ ... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]:
430
+ ... continue # Skip bad examples from the paper.
431
+ ... assert round(jaro_similarity(s1, s2), 3) == jscore
432
+ ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
433
+
434
+
435
+
436
+ This test-case proves that the output of Jaro-Winkler similarity depends on
437
+ the product l * p and not on the product max_l * p. Here the product max_l * p > 1
438
+ however the product l * p <= 1
439
+
440
+ >>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3)
441
+ 0.88
442
+ """
443
+ # To ensure that the output of the Jaro-Winkler's similarity
444
+ # falls between [0,1], the product of l * p needs to be
445
+ # also fall between [0,1].
446
+ if not 0 <= max_l * p <= 1:
447
+ warnings.warn(
448
+ str(
449
+ "The product `max_l * p` might not fall between [0,1]."
450
+ "Jaro-Winkler similarity might not be between 0 and 1."
451
+ )
452
+ )
453
+
454
+ # Compute the Jaro similarity
455
+ jaro_sim = jaro_similarity(s1, s2)
456
+
457
+ # Initialize the upper bound for the no. of prefixes.
458
+ # if user did not pre-define the upperbound,
459
+ # use shorter length between s1 and s2
460
+
461
+ # Compute the prefix matches.
462
+ l = 0
463
+ # zip() will automatically loop until the end of shorter string.
464
+ for s1_i, s2_i in zip(s1, s2):
465
+ if s1_i == s2_i:
466
+ l += 1
467
+ else:
468
+ break
469
+ if l == max_l:
470
+ break
471
+ # Return the similarity value as described in docstring.
472
+ return jaro_sim + (l * p * (1 - jaro_sim))
473
+
474
+
475
+ def demo():
476
+ string_distance_examples = [
477
+ ("rain", "shine"),
478
+ ("abcdef", "acbdef"),
479
+ ("language", "lnaguaeg"),
480
+ ("language", "lnaugage"),
481
+ ("language", "lngauage"),
482
+ ]
483
+ for s1, s2 in string_distance_examples:
484
+ print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2))
485
+ print(
486
+ f"Edit dist with transpositions btwn '{s1}' and '{s2}':",
487
+ edit_distance(s1, s2, transpositions=True),
488
+ )
489
+ print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2))
490
+ print(
491
+ f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':",
492
+ jaro_winkler_similarity(s1, s2),
493
+ )
494
+ print(
495
+ f"Jaro-Winkler distance btwn '{s1}' and '{s2}':",
496
+ 1 - jaro_winkler_similarity(s1, s2),
497
+ )
498
+ s1 = {1, 2, 3, 4}
499
+ s2 = {3, 4, 5}
500
+ print("s1:", s1)
501
+ print("s2:", s2)
502
+ print("Binary distance:", binary_distance(s1, s2))
503
+ print("Jaccard distance:", jaccard_distance(s1, s2))
504
+ print("MASI distance:", masi_distance(s1, s2))
505
+
506
+
507
+ if __name__ == "__main__":
508
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/paice.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Agreement Metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Lauri Hallila <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+
9
+ """Counts Paice's performance statistics for evaluating stemming algorithms.
10
+
11
+ What is required:
12
+ - A dictionary of words grouped by their real lemmas
13
+ - A dictionary of words grouped by stems from a stemming algorithm
14
+
15
+ When these are given, Understemming Index (UI), Overstemming Index (OI),
16
+ Stemming Weight (SW) and Error-rate relative to truncation (ERRT) are counted.
17
+
18
+ References:
19
+ Chris D. Paice (1994). An evaluation method for stemming algorithms.
20
+ In Proceedings of SIGIR, 42--50.
21
+ """
22
+
23
+ from math import sqrt
24
+
25
+
26
+ def get_words_from_dictionary(lemmas):
27
+ """
28
+ Get original set of words used for analysis.
29
+
30
+ :param lemmas: A dictionary where keys are lemmas and values are sets
31
+ or lists of words corresponding to that lemma.
32
+ :type lemmas: dict(str): list(str)
33
+ :return: Set of words that exist as values in the dictionary
34
+ :rtype: set(str)
35
+ """
36
+ words = set()
37
+ for lemma in lemmas:
38
+ words.update(set(lemmas[lemma]))
39
+ return words
40
+
41
+
42
+ def _truncate(words, cutlength):
43
+ """Group words by stems defined by truncating them at given length.
44
+
45
+ :param words: Set of words used for analysis
46
+ :param cutlength: Words are stemmed by cutting at this length.
47
+ :type words: set(str) or list(str)
48
+ :type cutlength: int
49
+ :return: Dictionary where keys are stems and values are sets of words
50
+ corresponding to that stem.
51
+ :rtype: dict(str): set(str)
52
+ """
53
+ stems = {}
54
+ for word in words:
55
+ stem = word[:cutlength]
56
+ try:
57
+ stems[stem].update([word])
58
+ except KeyError:
59
+ stems[stem] = {word}
60
+ return stems
61
+
62
+
63
+ # Reference: https://en.wikipedia.org/wiki/Line-line_intersection
64
+ def _count_intersection(l1, l2):
65
+ """Count intersection between two line segments defined by coordinate pairs.
66
+
67
+ :param l1: Tuple of two coordinate pairs defining the first line segment
68
+ :param l2: Tuple of two coordinate pairs defining the second line segment
69
+ :type l1: tuple(float, float)
70
+ :type l2: tuple(float, float)
71
+ :return: Coordinates of the intersection
72
+ :rtype: tuple(float, float)
73
+ """
74
+ x1, y1 = l1[0]
75
+ x2, y2 = l1[1]
76
+ x3, y3 = l2[0]
77
+ x4, y4 = l2[1]
78
+
79
+ denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
80
+
81
+ if denominator == 0.0: # lines are parallel
82
+ if x1 == x2 == x3 == x4 == 0.0:
83
+ # When lines are parallel, they must be on the y-axis.
84
+ # We can ignore x-axis because we stop counting the
85
+ # truncation line when we get there.
86
+ # There are no other options as UI (x-axis) grows and
87
+ # OI (y-axis) diminishes when we go along the truncation line.
88
+ return (0.0, y4)
89
+
90
+ x = (
91
+ (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)
92
+ ) / denominator
93
+ y = (
94
+ (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)
95
+ ) / denominator
96
+ return (x, y)
97
+
98
+
99
+ def _get_derivative(coordinates):
100
+ """Get derivative of the line from (0,0) to given coordinates.
101
+
102
+ :param coordinates: A coordinate pair
103
+ :type coordinates: tuple(float, float)
104
+ :return: Derivative; inf if x is zero
105
+ :rtype: float
106
+ """
107
+ try:
108
+ return coordinates[1] / coordinates[0]
109
+ except ZeroDivisionError:
110
+ return float("inf")
111
+
112
+
113
+ def _calculate_cut(lemmawords, stems):
114
+ """Count understemmed and overstemmed pairs for (lemma, stem) pair with common words.
115
+
116
+ :param lemmawords: Set or list of words corresponding to certain lemma.
117
+ :param stems: A dictionary where keys are stems and values are sets
118
+ or lists of words corresponding to that stem.
119
+ :type lemmawords: set(str) or list(str)
120
+ :type stems: dict(str): set(str)
121
+ :return: Amount of understemmed and overstemmed pairs contributed by words
122
+ existing in both lemmawords and stems.
123
+ :rtype: tuple(float, float)
124
+ """
125
+ umt, wmt = 0.0, 0.0
126
+ for stem in stems:
127
+ cut = set(lemmawords) & set(stems[stem])
128
+ if cut:
129
+ cutcount = len(cut)
130
+ stemcount = len(stems[stem])
131
+ # Unachieved merge total
132
+ umt += cutcount * (len(lemmawords) - cutcount)
133
+ # Wrongly merged total
134
+ wmt += cutcount * (stemcount - cutcount)
135
+ return (umt, wmt)
136
+
137
+
138
+ def _calculate(lemmas, stems):
139
+ """Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs.
140
+
141
+ :param lemmas: A dictionary where keys are lemmas and values are sets
142
+ or lists of words corresponding to that lemma.
143
+ :param stems: A dictionary where keys are stems and values are sets
144
+ or lists of words corresponding to that stem.
145
+ :type lemmas: dict(str): list(str)
146
+ :type stems: dict(str): set(str)
147
+ :return: Global unachieved merge total (gumt),
148
+ global desired merge total (gdmt),
149
+ global wrongly merged total (gwmt) and
150
+ global desired non-merge total (gdnt).
151
+ :rtype: tuple(float, float, float, float)
152
+ """
153
+
154
+ n = sum(len(lemmas[word]) for word in lemmas)
155
+
156
+ gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0)
157
+
158
+ for lemma in lemmas:
159
+ lemmacount = len(lemmas[lemma])
160
+
161
+ # Desired merge total
162
+ gdmt += lemmacount * (lemmacount - 1)
163
+
164
+ # Desired non-merge total
165
+ gdnt += lemmacount * (n - lemmacount)
166
+
167
+ # For each (lemma, stem) pair with common words, count how many
168
+ # pairs are understemmed and overstemmed.
169
+ umt, wmt = _calculate_cut(lemmas[lemma], stems)
170
+
171
+ # Add to total undesired and wrongly-merged totals
172
+ gumt += umt
173
+ gwmt += wmt
174
+
175
+ # Each object is counted twice, so divide by two
176
+ return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2)
177
+
178
+
179
+ def _indexes(gumt, gdmt, gwmt, gdnt):
180
+ """Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW).
181
+
182
+ :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt),
183
+ global desired merge total (gdmt),
184
+ global wrongly merged total (gwmt) and
185
+ global desired non-merge total (gdnt).
186
+ :type gumt, gdmt, gwmt, gdnt: float
187
+ :return: Understemming Index (UI),
188
+ Overstemming Index (OI) and
189
+ Stemming Weight (SW).
190
+ :rtype: tuple(float, float, float)
191
+ """
192
+ # Calculate Understemming Index (UI),
193
+ # Overstemming Index (OI) and Stemming Weight (SW)
194
+ try:
195
+ ui = gumt / gdmt
196
+ except ZeroDivisionError:
197
+ # If GDMT (max merge total) is 0, define UI as 0
198
+ ui = 0.0
199
+ try:
200
+ oi = gwmt / gdnt
201
+ except ZeroDivisionError:
202
+ # IF GDNT (max non-merge total) is 0, define OI as 0
203
+ oi = 0.0
204
+ try:
205
+ sw = oi / ui
206
+ except ZeroDivisionError:
207
+ if oi == 0.0:
208
+ # OI and UI are 0, define SW as 'not a number'
209
+ sw = float("nan")
210
+ else:
211
+ # UI is 0, define SW as infinity
212
+ sw = float("inf")
213
+ return (ui, oi, sw)
214
+
215
+
216
+ class Paice:
217
+ """Class for storing lemmas, stems and evaluation metrics."""
218
+
219
+ def __init__(self, lemmas, stems):
220
+ """
221
+ :param lemmas: A dictionary where keys are lemmas and values are sets
222
+ or lists of words corresponding to that lemma.
223
+ :param stems: A dictionary where keys are stems and values are sets
224
+ or lists of words corresponding to that stem.
225
+ :type lemmas: dict(str): list(str)
226
+ :type stems: dict(str): set(str)
227
+ """
228
+ self.lemmas = lemmas
229
+ self.stems = stems
230
+ self.coords = []
231
+ self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None)
232
+ self.ui, self.oi, self.sw = (None, None, None)
233
+ self.errt = None
234
+ self.update()
235
+
236
+ def __str__(self):
237
+ text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt]
238
+ text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt)
239
+ text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt)
240
+ text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt)
241
+ text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui)
242
+ text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi)
243
+ text.append("Stemming Weight (OI / UI): %s\n" % self.sw)
244
+ text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt)
245
+ coordinates = " ".join(["(%s, %s)" % item for item in self.coords])
246
+ text.append("Truncation line: %s" % coordinates)
247
+ return "".join(text)
248
+
249
+ def _get_truncation_indexes(self, words, cutlength):
250
+ """Count (UI, OI) when stemming is done by truncating words at \'cutlength\'.
251
+
252
+ :param words: Words used for the analysis
253
+ :param cutlength: Words are stemmed by cutting them at this length
254
+ :type words: set(str) or list(str)
255
+ :type cutlength: int
256
+ :return: Understemming and overstemming indexes
257
+ :rtype: tuple(int, int)
258
+ """
259
+
260
+ truncated = _truncate(words, cutlength)
261
+ gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated)
262
+ ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2]
263
+ return (ui, oi)
264
+
265
+ def _get_truncation_coordinates(self, cutlength=0):
266
+ """Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line.
267
+
268
+ :param cutlength: Optional parameter to start counting from (ui, oi)
269
+ coordinates gotten by stemming at this length. Useful for speeding up
270
+ the calculations when you know the approximate location of the
271
+ intersection.
272
+ :type cutlength: int
273
+ :return: List of coordinate pairs that define the truncation line
274
+ :rtype: list(tuple(float, float))
275
+ """
276
+ words = get_words_from_dictionary(self.lemmas)
277
+ maxlength = max(len(word) for word in words)
278
+
279
+ # Truncate words from different points until (0, 0) - (ui, oi) segment crosses the truncation line
280
+ coords = []
281
+ while cutlength <= maxlength:
282
+ # Get (UI, OI) pair of current truncation point
283
+ pair = self._get_truncation_indexes(words, cutlength)
284
+
285
+ # Store only new coordinates so we'll have an actual
286
+ # line segment when counting the intersection point
287
+ if pair not in coords:
288
+ coords.append(pair)
289
+ if pair == (0.0, 0.0):
290
+ # Stop counting if truncation line goes through origo;
291
+ # length from origo to truncation line is 0
292
+ return coords
293
+ if len(coords) >= 2 and pair[0] > 0.0:
294
+ derivative1 = _get_derivative(coords[-2])
295
+ derivative2 = _get_derivative(coords[-1])
296
+ # Derivative of the truncation line is a decreasing value;
297
+ # when it passes Stemming Weight, we've found the segment
298
+ # of truncation line intersecting with (0, 0) - (ui, oi) segment
299
+ if derivative1 >= self.sw >= derivative2:
300
+ return coords
301
+ cutlength += 1
302
+ return coords
303
+
304
+ def _errt(self):
305
+ """Count Error-Rate Relative to Truncation (ERRT).
306
+
307
+ :return: ERRT, length of the line from origo to (UI, OI) divided by
308
+ the length of the line from origo to the point defined by the same
309
+ line when extended until the truncation line.
310
+ :rtype: float
311
+ """
312
+ # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line
313
+ self.coords = self._get_truncation_coordinates()
314
+ if (0.0, 0.0) in self.coords:
315
+ # Truncation line goes through origo, so ERRT cannot be counted
316
+ if (self.ui, self.oi) != (0.0, 0.0):
317
+ return float("inf")
318
+ else:
319
+ return float("nan")
320
+ if (self.ui, self.oi) == (0.0, 0.0):
321
+ # (ui, oi) is origo; define errt as 0.0
322
+ return 0.0
323
+ # Count the intersection point
324
+ # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates
325
+ # so we have actual line segments instead of a line segment and a point
326
+ intersection = _count_intersection(
327
+ ((0, 0), (self.ui, self.oi)), self.coords[-2:]
328
+ )
329
+ # Count OP (length of the line from origo to (ui, oi))
330
+ op = sqrt(self.ui**2 + self.oi**2)
331
+ # Count OT (length of the line from origo to truncation line that goes through (ui, oi))
332
+ ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2)
333
+ # OP / OT tells how well the stemming algorithm works compared to just truncating words
334
+ return op / ot
335
+
336
+ def update(self):
337
+ """Update statistics after lemmas and stems have been set."""
338
+ self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems)
339
+ self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt)
340
+ self.errt = self._errt()
341
+
342
+
343
+ def demo():
344
+ """Demonstration of the module."""
345
+ # Some words with their real lemmas
346
+ lemmas = {
347
+ "kneel": ["kneel", "knelt"],
348
+ "range": ["range", "ranged"],
349
+ "ring": ["ring", "rang", "rung"],
350
+ }
351
+ # Same words with stems from a stemming algorithm
352
+ stems = {
353
+ "kneel": ["kneel"],
354
+ "knelt": ["knelt"],
355
+ "rang": ["rang", "range", "ranged"],
356
+ "ring": ["ring"],
357
+ "rung": ["rung"],
358
+ }
359
+ print("Words grouped by their lemmas:")
360
+ for lemma in sorted(lemmas):
361
+ print("{} => {}".format(lemma, " ".join(lemmas[lemma])))
362
+ print()
363
+ print("Same words grouped by a stemming algorithm:")
364
+ for stem in sorted(stems):
365
+ print("{} => {}".format(stem, " ".join(stems[stem])))
366
+ print()
367
+ p = Paice(lemmas, stems)
368
+ print(p)
369
+ print()
370
+ # Let's "change" results from a stemming algorithm
371
+ stems = {
372
+ "kneel": ["kneel"],
373
+ "knelt": ["knelt"],
374
+ "rang": ["rang"],
375
+ "range": ["range", "ranged"],
376
+ "ring": ["ring"],
377
+ "rung": ["rung"],
378
+ }
379
+ print("Counting stats after changing stemming results:")
380
+ for stem in sorted(stems):
381
+ print("{} => {}".format(stem, " ".join(stems[stem])))
382
+ print()
383
+ p.stems = stems
384
+ p.update()
385
+ print(p)
386
+
387
+
388
+ if __name__ == "__main__":
389
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/scores.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Evaluation
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import operator
10
+ from functools import reduce
11
+ from math import fabs
12
+ from random import shuffle
13
+
14
+ try:
15
+ from scipy.stats.stats import betai
16
+ except ImportError:
17
+ betai = None
18
+
19
+ from nltk.util import LazyConcatenation, LazyMap
20
+
21
+
22
+ def accuracy(reference, test):
23
+ """
24
+ Given a list of reference values and a corresponding list of test
25
+ values, return the fraction of corresponding values that are
26
+ equal. In particular, return the fraction of indices
27
+ ``0<i<=len(test)`` such that ``test[i] == reference[i]``.
28
+
29
+ :type reference: list
30
+ :param reference: An ordered list of reference values.
31
+ :type test: list
32
+ :param test: A list of values to compare against the corresponding
33
+ reference values.
34
+ :raise ValueError: If ``reference`` and ``length`` do not have the
35
+ same length.
36
+ """
37
+ if len(reference) != len(test):
38
+ raise ValueError("Lists must have the same length.")
39
+ return sum(x == y for x, y in zip(reference, test)) / len(test)
40
+
41
+
42
+ def precision(reference, test):
43
+ """
44
+ Given a set of reference values and a set of test values, return
45
+ the fraction of test values that appear in the reference set.
46
+ In particular, return card(``reference`` intersection ``test``)/card(``test``).
47
+ If ``test`` is empty, then return None.
48
+
49
+ :type reference: set
50
+ :param reference: A set of reference values.
51
+ :type test: set
52
+ :param test: A set of values to compare against the reference set.
53
+ :rtype: float or None
54
+ """
55
+ if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
56
+ raise TypeError("reference and test should be sets")
57
+
58
+ if len(test) == 0:
59
+ return None
60
+ else:
61
+ return len(reference.intersection(test)) / len(test)
62
+
63
+
64
+ def recall(reference, test):
65
+ """
66
+ Given a set of reference values and a set of test values, return
67
+ the fraction of reference values that appear in the test set.
68
+ In particular, return card(``reference`` intersection ``test``)/card(``reference``).
69
+ If ``reference`` is empty, then return None.
70
+
71
+ :type reference: set
72
+ :param reference: A set of reference values.
73
+ :type test: set
74
+ :param test: A set of values to compare against the reference set.
75
+ :rtype: float or None
76
+ """
77
+ if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
78
+ raise TypeError("reference and test should be sets")
79
+
80
+ if len(reference) == 0:
81
+ return None
82
+ else:
83
+ return len(reference.intersection(test)) / len(reference)
84
+
85
+
86
+ def f_measure(reference, test, alpha=0.5):
87
+ """
88
+ Given a set of reference values and a set of test values, return
89
+ the f-measure of the test values, when compared against the
90
+ reference values. The f-measure is the harmonic mean of the
91
+ ``precision`` and ``recall``, weighted by ``alpha``. In particular,
92
+ given the precision *p* and recall *r* defined by:
93
+
94
+ - *p* = card(``reference`` intersection ``test``)/card(``test``)
95
+ - *r* = card(``reference`` intersection ``test``)/card(``reference``)
96
+
97
+ The f-measure is:
98
+
99
+ - *1/(alpha/p + (1-alpha)/r)*
100
+
101
+ If either ``reference`` or ``test`` is empty, then ``f_measure``
102
+ returns None.
103
+
104
+ :type reference: set
105
+ :param reference: A set of reference values.
106
+ :type test: set
107
+ :param test: A set of values to compare against the reference set.
108
+ :rtype: float or None
109
+ """
110
+ p = precision(reference, test)
111
+ r = recall(reference, test)
112
+ if p is None or r is None:
113
+ return None
114
+ if p == 0 or r == 0:
115
+ return 0
116
+ return 1.0 / (alpha / p + (1 - alpha) / r)
117
+
118
+
119
+ def log_likelihood(reference, test):
120
+ """
121
+ Given a list of reference values and a corresponding list of test
122
+ probability distributions, return the average log likelihood of
123
+ the reference values, given the probability distributions.
124
+
125
+ :param reference: A list of reference values
126
+ :type reference: list
127
+ :param test: A list of probability distributions over values to
128
+ compare against the corresponding reference values.
129
+ :type test: list(ProbDistI)
130
+ """
131
+ if len(reference) != len(test):
132
+ raise ValueError("Lists must have the same length.")
133
+
134
+ # Return the average value of dist.logprob(val).
135
+ total_likelihood = sum(dist.logprob(val) for (val, dist) in zip(reference, test))
136
+ return total_likelihood / len(reference)
137
+
138
+
139
+ def approxrand(a, b, **kwargs):
140
+ """
141
+ Returns an approximate significance level between two lists of
142
+ independently generated test values.
143
+
144
+ Approximate randomization calculates significance by randomly drawing
145
+ from a sample of the possible permutations. At the limit of the number
146
+ of possible permutations, the significance level is exact. The
147
+ approximate significance level is the sample mean number of times the
148
+ statistic of the permutated lists varies from the actual statistic of
149
+ the unpermuted argument lists.
150
+
151
+ :return: a tuple containing an approximate significance level, the count
152
+ of the number of times the pseudo-statistic varied from the
153
+ actual statistic, and the number of shuffles
154
+ :rtype: tuple
155
+ :param a: a list of test values
156
+ :type a: list
157
+ :param b: another list of independently generated test values
158
+ :type b: list
159
+ """
160
+ shuffles = kwargs.get("shuffles", 999)
161
+ # there's no point in trying to shuffle beyond all possible permutations
162
+ shuffles = min(shuffles, reduce(operator.mul, range(1, len(a) + len(b) + 1)))
163
+ stat = kwargs.get("statistic", lambda lst: sum(lst) / len(lst))
164
+ verbose = kwargs.get("verbose", False)
165
+
166
+ if verbose:
167
+ print("shuffles: %d" % shuffles)
168
+
169
+ actual_stat = fabs(stat(a) - stat(b))
170
+
171
+ if verbose:
172
+ print("actual statistic: %f" % actual_stat)
173
+ print("-" * 60)
174
+
175
+ c = 1e-100
176
+ lst = LazyConcatenation([a, b])
177
+ indices = list(range(len(a) + len(b)))
178
+
179
+ for i in range(shuffles):
180
+ if verbose and i % 10 == 0:
181
+ print("shuffle: %d" % i)
182
+
183
+ shuffle(indices)
184
+
185
+ pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[: len(a)]))
186
+ pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a) :]))
187
+ pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)
188
+
189
+ if pseudo_stat >= actual_stat:
190
+ c += 1
191
+
192
+ if verbose and i % 10 == 0:
193
+ print("pseudo-statistic: %f" % pseudo_stat)
194
+ print("significance: %f" % ((c + 1) / (i + 1)))
195
+ print("-" * 60)
196
+
197
+ significance = (c + 1) / (shuffles + 1)
198
+
199
+ if verbose:
200
+ print("significance: %f" % significance)
201
+ if betai:
202
+ for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
203
+ print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}")
204
+
205
+ return (significance, c, shuffles)
206
+
207
+
208
+ def demo():
209
+ print("-" * 75)
210
+ reference = "DET NN VB DET JJ NN NN IN DET NN".split()
211
+ test = "DET VB VB DET NN NN NN IN DET NN".split()
212
+ print("Reference =", reference)
213
+ print("Test =", test)
214
+ print("Accuracy:", accuracy(reference, test))
215
+
216
+ print("-" * 75)
217
+ reference_set = set(reference)
218
+ test_set = set(test)
219
+ print("Reference =", reference_set)
220
+ print("Test = ", test_set)
221
+ print("Precision:", precision(reference_set, test_set))
222
+ print(" Recall:", recall(reference_set, test_set))
223
+ print("F-Measure:", f_measure(reference_set, test_set))
224
+ print("-" * 75)
225
+
226
+
227
+ if __name__ == "__main__":
228
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/metrics/segmentation.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Text Segmentation Metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # David Doukhan <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+
11
+ """
12
+ Text Segmentation Metrics
13
+
14
+ 1. Windowdiff
15
+
16
+ Pevzner, L., and Hearst, M., A Critique and Improvement of
17
+ an Evaluation Metric for Text Segmentation,
18
+ Computational Linguistics 28, 19-36
19
+
20
+
21
+ 2. Generalized Hamming Distance
22
+
23
+ Bookstein A., Kulyukin V.A., Raita T.
24
+ Generalized Hamming Distance
25
+ Information Retrieval 5, 2002, pp 353-375
26
+
27
+ Baseline implementation in C++
28
+ http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html
29
+
30
+ Study describing benefits of Generalized Hamming Distance Versus
31
+ WindowDiff for evaluating text segmentation tasks
32
+ Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ?
33
+ TALN 2009
34
+
35
+
36
+ 3. Pk text segmentation metric
37
+
38
+ Beeferman D., Berger A., Lafferty J. (1999)
39
+ Statistical Models for Text Segmentation
40
+ Machine Learning, 34, 177-210
41
+ """
42
+
43
+ try:
44
+ import numpy as np
45
+ except ImportError:
46
+ pass
47
+
48
+
49
+ def windowdiff(seg1, seg2, k, boundary="1", weighted=False):
50
+ """
51
+ Compute the windowdiff score for a pair of segmentations. A
52
+ segmentation is any sequence over a vocabulary of two items
53
+ (e.g. "0", "1"), where the specified boundary value is used to
54
+ mark the edge of a segmentation.
55
+
56
+ >>> s1 = "000100000010"
57
+ >>> s2 = "000010000100"
58
+ >>> s3 = "100000010000"
59
+ >>> '%.2f' % windowdiff(s1, s1, 3)
60
+ '0.00'
61
+ >>> '%.2f' % windowdiff(s1, s2, 3)
62
+ '0.30'
63
+ >>> '%.2f' % windowdiff(s2, s3, 3)
64
+ '0.80'
65
+
66
+ :param seg1: a segmentation
67
+ :type seg1: str or list
68
+ :param seg2: a segmentation
69
+ :type seg2: str or list
70
+ :param k: window width
71
+ :type k: int
72
+ :param boundary: boundary value
73
+ :type boundary: str or int or bool
74
+ :param weighted: use the weighted variant of windowdiff
75
+ :type weighted: boolean
76
+ :rtype: float
77
+ """
78
+
79
+ if len(seg1) != len(seg2):
80
+ raise ValueError("Segmentations have unequal length")
81
+ if k > len(seg1):
82
+ raise ValueError(
83
+ "Window width k should be smaller or equal than segmentation lengths"
84
+ )
85
+ wd = 0
86
+ for i in range(len(seg1) - k + 1):
87
+ ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary))
88
+ if weighted:
89
+ wd += ndiff
90
+ else:
91
+ wd += min(1, ndiff)
92
+ return wd / (len(seg1) - k + 1.0)
93
+
94
+
95
+ # Generalized Hamming Distance
96
+
97
+
98
+ def _init_mat(nrows, ncols, ins_cost, del_cost):
99
+ mat = np.empty((nrows, ncols))
100
+ mat[0, :] = ins_cost * np.arange(ncols)
101
+ mat[:, 0] = del_cost * np.arange(nrows)
102
+ return mat
103
+
104
+
105
+ def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff):
106
+ for i, rowi in enumerate(rowv):
107
+ for j, colj in enumerate(colv):
108
+ shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j]
109
+ if rowi == colj:
110
+ # boundaries are at the same location, no transformation required
111
+ tcost = mat[i, j]
112
+ elif rowi > colj:
113
+ # boundary match through a deletion
114
+ tcost = del_cost + mat[i, j + 1]
115
+ else:
116
+ # boundary match through an insertion
117
+ tcost = ins_cost + mat[i + 1, j]
118
+ mat[i + 1, j + 1] = min(tcost, shift_cost)
119
+
120
+
121
+ def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"):
122
+ """
123
+ Compute the Generalized Hamming Distance for a reference and a hypothetical
124
+ segmentation, corresponding to the cost related to the transformation
125
+ of the hypothetical segmentation into the reference segmentation
126
+ through boundary insertion, deletion and shift operations.
127
+
128
+ A segmentation is any sequence over a vocabulary of two items
129
+ (e.g. "0", "1"), where the specified boundary value is used to
130
+ mark the edge of a segmentation.
131
+
132
+ Recommended parameter values are a shift_cost_coeff of 2.
133
+ Associated with a ins_cost, and del_cost equal to the mean segment
134
+ length in the reference segmentation.
135
+
136
+ >>> # Same examples as Kulyukin C++ implementation
137
+ >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5)
138
+ 0.5
139
+ >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5)
140
+ 2.0
141
+ >>> ghd('011', '110', 1.0, 1.0, 0.5)
142
+ 1.0
143
+ >>> ghd('1', '0', 1.0, 1.0, 0.5)
144
+ 1.0
145
+ >>> ghd('111', '000', 1.0, 1.0, 0.5)
146
+ 3.0
147
+ >>> ghd('000', '111', 1.0, 2.0, 0.5)
148
+ 6.0
149
+
150
+ :param ref: the reference segmentation
151
+ :type ref: str or list
152
+ :param hyp: the hypothetical segmentation
153
+ :type hyp: str or list
154
+ :param ins_cost: insertion cost
155
+ :type ins_cost: float
156
+ :param del_cost: deletion cost
157
+ :type del_cost: float
158
+ :param shift_cost_coeff: constant used to compute the cost of a shift.
159
+ ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j``
160
+ are the positions indicating the shift
161
+ :type shift_cost_coeff: float
162
+ :param boundary: boundary value
163
+ :type boundary: str or int or bool
164
+ :rtype: float
165
+ """
166
+
167
+ ref_idx = [i for (i, val) in enumerate(ref) if val == boundary]
168
+ hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary]
169
+
170
+ nref_bound = len(ref_idx)
171
+ nhyp_bound = len(hyp_idx)
172
+
173
+ if nref_bound == 0 and nhyp_bound == 0:
174
+ return 0.0
175
+ elif nref_bound > 0 and nhyp_bound == 0:
176
+ return nref_bound * ins_cost
177
+ elif nref_bound == 0 and nhyp_bound > 0:
178
+ return nhyp_bound * del_cost
179
+
180
+ mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost)
181
+ _ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff)
182
+ return mat[-1, -1]
183
+
184
+
185
+ # Beeferman's Pk text segmentation evaluation metric
186
+
187
+
188
+ def pk(ref, hyp, k=None, boundary="1"):
189
+ """
190
+ Compute the Pk metric for a pair of segmentations A segmentation
191
+ is any sequence over a vocabulary of two items (e.g. "0", "1"),
192
+ where the specified boundary value is used to mark the edge of a
193
+ segmentation.
194
+
195
+ >>> '%.2f' % pk('0100'*100, '1'*400, 2)
196
+ '0.50'
197
+ >>> '%.2f' % pk('0100'*100, '0'*400, 2)
198
+ '0.50'
199
+ >>> '%.2f' % pk('0100'*100, '0100'*100, 2)
200
+ '0.00'
201
+
202
+ :param ref: the reference segmentation
203
+ :type ref: str or list
204
+ :param hyp: the segmentation to evaluate
205
+ :type hyp: str or list
206
+ :param k: window size, if None, set to half of the average reference segment length
207
+ :type boundary: str or int or bool
208
+ :param boundary: boundary value
209
+ :type boundary: str or int or bool
210
+ :rtype: float
211
+ """
212
+
213
+ if k is None:
214
+ k = int(round(len(ref) / (ref.count(boundary) * 2.0)))
215
+
216
+ err = 0
217
+ for i in range(len(ref) - k + 1):
218
+ r = ref[i : i + k].count(boundary) > 0
219
+ h = hyp[i : i + k].count(boundary) > 0
220
+ if r != h:
221
+ err += 1
222
+ return err / (len(ref) - k + 1.0)
llmeval-env/lib/python3.10/site-packages/nltk/metrics/spearman.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Spearman Rank Correlation
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joel Nothman <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Tools for comparing ranked lists.
10
+ """
11
+
12
+
13
+ def _rank_dists(ranks1, ranks2):
14
+ """Finds the difference between the values in ranks1 and ranks2 for keys
15
+ present in both dicts. If the arguments are not dicts, they are converted
16
+ from (key, rank) sequences.
17
+ """
18
+ ranks1 = dict(ranks1)
19
+ ranks2 = dict(ranks2)
20
+ for k in ranks1:
21
+ try:
22
+ yield k, ranks1[k] - ranks2[k]
23
+ except KeyError:
24
+ pass
25
+
26
+
27
+ def spearman_correlation(ranks1, ranks2):
28
+ """Returns the Spearman correlation coefficient for two rankings, which
29
+ should be dicts or sequences of (key, rank). The coefficient ranges from
30
+ -1.0 (ranks are opposite) to 1.0 (ranks are identical), and is only
31
+ calculated for keys in both rankings (for meaningful results, remove keys
32
+ present in only one list before ranking)."""
33
+ n = 0
34
+ res = 0
35
+ for k, d in _rank_dists(ranks1, ranks2):
36
+ res += d * d
37
+ n += 1
38
+ try:
39
+ return 1 - (6 * res / (n * (n * n - 1)))
40
+ except ZeroDivisionError:
41
+ # Result is undefined if only one item is ranked
42
+ return 0.0
43
+
44
+
45
+ def ranks_from_sequence(seq):
46
+ """Given a sequence, yields each element with an increasing rank, suitable
47
+ for use as an argument to ``spearman_correlation``.
48
+ """
49
+ return ((k, i) for i, k in enumerate(seq))
50
+
51
+
52
+ def ranks_from_scores(scores, rank_gap=1e-15):
53
+ """Given a sequence of (key, score) tuples, yields each key with an
54
+ increasing rank, tying with previous key's rank if the difference between
55
+ their scores is less than rank_gap. Suitable for use as an argument to
56
+ ``spearman_correlation``.
57
+ """
58
+ prev_score = None
59
+ rank = 0
60
+ for i, (key, score) in enumerate(scores):
61
+ try:
62
+ if abs(score - prev_score) > rank_gap:
63
+ rank = i
64
+ except TypeError:
65
+ pass
66
+
67
+ yield key, rank
68
+ prev_score = score
llmeval-env/lib/python3.10/site-packages/nltk/misc/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Miscellaneous modules
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.misc.babelfish import babelize_shell
9
+ from nltk.misc.chomsky import generate_chomsky
10
+ from nltk.misc.minimalset import MinimalSet
11
+ from nltk.misc.wordfinder import word_finder
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (409 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc ADDED
Binary file (624 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/sort.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: List Sorting
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ This module provides a variety of list sorting algorithms, to
10
+ illustrate the many different algorithms (recipes) for solving a
11
+ problem, and how to analyze algorithms experimentally.
12
+ """
13
+ # These algorithms are taken from:
14
+ # Levitin (2004) The Design and Analysis of Algorithms
15
+
16
+ ##################################################################
17
+ # Selection Sort
18
+ ##################################################################
19
+
20
+
21
+ def selection(a):
22
+ """
23
+ Selection Sort: scan the list to find its smallest element, then
24
+ swap it with the first element. The remainder of the list is one
25
+ element smaller; apply the same method to this list, and so on.
26
+ """
27
+ count = 0
28
+
29
+ for i in range(len(a) - 1):
30
+ min = i
31
+
32
+ for j in range(i + 1, len(a)):
33
+ if a[j] < a[min]:
34
+ min = j
35
+
36
+ count += 1
37
+
38
+ a[min], a[i] = a[i], a[min]
39
+
40
+ return count
41
+
42
+
43
+ ##################################################################
44
+ # Bubble Sort
45
+ ##################################################################
46
+
47
+
48
+ def bubble(a):
49
+ """
50
+ Bubble Sort: compare adjacent elements of the list left-to-right,
51
+ and swap them if they are out of order. After one pass through
52
+ the list swapping adjacent items, the largest item will be in
53
+ the rightmost position. The remainder is one element smaller;
54
+ apply the same method to this list, and so on.
55
+ """
56
+ count = 0
57
+ for i in range(len(a) - 1):
58
+ for j in range(len(a) - i - 1):
59
+ if a[j + 1] < a[j]:
60
+ a[j], a[j + 1] = a[j + 1], a[j]
61
+ count += 1
62
+ return count
63
+
64
+
65
+ ##################################################################
66
+ # Merge Sort
67
+ ##################################################################
68
+
69
+
70
+ def _merge_lists(b, c):
71
+ count = 0
72
+ i = j = 0
73
+ a = []
74
+ while i < len(b) and j < len(c):
75
+ count += 1
76
+ if b[i] <= c[j]:
77
+ a.append(b[i])
78
+ i += 1
79
+ else:
80
+ a.append(c[j])
81
+ j += 1
82
+ if i == len(b):
83
+ a += c[j:]
84
+ else:
85
+ a += b[i:]
86
+ return a, count
87
+
88
+
89
+ def merge(a):
90
+ """
91
+ Merge Sort: split the list in half, and sort each half, then
92
+ combine the sorted halves.
93
+ """
94
+ count = 0
95
+ if len(a) > 1:
96
+ midpoint = len(a) // 2
97
+ b = a[:midpoint]
98
+ c = a[midpoint:]
99
+ count_b = merge(b)
100
+ count_c = merge(c)
101
+ result, count_a = _merge_lists(b, c)
102
+ a[:] = result # copy the result back into a.
103
+ count = count_a + count_b + count_c
104
+ return count
105
+
106
+
107
+ ##################################################################
108
+ # Quick Sort
109
+ ##################################################################
110
+
111
+
112
+ def _partition(a, l, r):
113
+ p = a[l]
114
+ i = l
115
+ j = r + 1
116
+ count = 0
117
+ while True:
118
+ while i < r:
119
+ i += 1
120
+ if a[i] >= p:
121
+ break
122
+ while j > l:
123
+ j -= 1
124
+ if j < l or a[j] <= p:
125
+ break
126
+ a[i], a[j] = a[j], a[i] # swap
127
+ count += 1
128
+ if i >= j:
129
+ break
130
+ a[i], a[j] = a[j], a[i] # undo last swap
131
+ a[l], a[j] = a[j], a[l]
132
+ return j, count
133
+
134
+
135
+ def _quick(a, l, r):
136
+ count = 0
137
+ if l < r:
138
+ s, count = _partition(a, l, r)
139
+ count += _quick(a, l, s - 1)
140
+ count += _quick(a, s + 1, r)
141
+ return count
142
+
143
+
144
+ def quick(a):
145
+ return _quick(a, 0, len(a) - 1)
146
+
147
+
148
+ ##################################################################
149
+ # Demonstration
150
+ ##################################################################
151
+
152
+
153
+ def demo():
154
+ from random import shuffle
155
+
156
+ for size in (10, 20, 50, 100, 200, 500, 1000):
157
+ a = list(range(size))
158
+
159
+ # various sort methods
160
+ shuffle(a)
161
+ count_selection = selection(a)
162
+ shuffle(a)
163
+ count_bubble = bubble(a)
164
+ shuffle(a)
165
+ count_merge = merge(a)
166
+ shuffle(a)
167
+ count_quick = quick(a)
168
+
169
+ print(
170
+ ("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d")
171
+ % (size, count_selection, count_bubble, count_merge, count_quick)
172
+ )
173
+
174
+
175
+ if __name__ == "__main__":
176
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/misc/wordfinder.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word Finder
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Simplified from PHP version by Robert Klein <[email protected]>
9
+ # http://fswordfinder.sourceforge.net/
10
+
11
+ import random
12
+
13
+
14
+ # reverse a word with probability 0.5
15
+ def revword(word):
16
+ if random.randint(1, 2) == 1:
17
+ return word[::-1]
18
+ return word
19
+
20
+
21
+ # try to insert word at position x,y; direction encoded in xf,yf
22
+ def step(word, x, xf, y, yf, grid):
23
+ for i in range(len(word)):
24
+ if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]:
25
+ return False
26
+ for i in range(len(word)):
27
+ grid[xf(i)][yf(i)] = word[i]
28
+ return True
29
+
30
+
31
+ # try to insert word at position x,y, in direction dir
32
+ def check(word, dir, x, y, grid, rows, cols):
33
+ if dir == 1:
34
+ if x - len(word) < 0 or y - len(word) < 0:
35
+ return False
36
+ return step(word, x, lambda i: x - i, y, lambda i: y - i, grid)
37
+ elif dir == 2:
38
+ if x - len(word) < 0:
39
+ return False
40
+ return step(word, x, lambda i: x - i, y, lambda i: y, grid)
41
+ elif dir == 3:
42
+ if x - len(word) < 0 or y + (len(word) - 1) >= cols:
43
+ return False
44
+ return step(word, x, lambda i: x - i, y, lambda i: y + i, grid)
45
+ elif dir == 4:
46
+ if y - len(word) < 0:
47
+ return False
48
+ return step(word, x, lambda i: x, y, lambda i: y - i, grid)
49
+
50
+
51
+ def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
52
+ """
53
+ Attempt to arrange words into a letter-grid with the specified
54
+ number of rows and columns. Try each word in several positions
55
+ and directions, until it can be fitted into the grid, or the
56
+ maximum number of allowable attempts is exceeded. Returns a tuple
57
+ consisting of the grid and the words that were successfully
58
+ placed.
59
+
60
+ :param words: the list of words to be put into the grid
61
+ :type words: list
62
+ :param rows: the number of rows in the grid
63
+ :type rows: int
64
+ :param cols: the number of columns in the grid
65
+ :type cols: int
66
+ :param attempts: the number of times to attempt placing a word
67
+ :type attempts: int
68
+ :param alph: the alphabet, to be used for filling blank cells
69
+ :type alph: list
70
+ :rtype: tuple
71
+ """
72
+
73
+ # place longer words first
74
+ words = sorted(words, key=len, reverse=True)
75
+
76
+ grid = [] # the letter grid
77
+ used = [] # the words we used
78
+
79
+ # initialize the grid
80
+ for i in range(rows):
81
+ grid.append([""] * cols)
82
+
83
+ # try to place each word
84
+ for word in words:
85
+ word = word.strip().upper() # normalize
86
+ save = word # keep a record of the word
87
+ word = revword(word)
88
+ for attempt in range(attempts):
89
+ r = random.randint(0, len(word))
90
+ dir = random.choice([1, 2, 3, 4])
91
+ x = random.randint(0, rows)
92
+ y = random.randint(0, cols)
93
+ if dir == 1:
94
+ x += r
95
+ y += r
96
+ elif dir == 2:
97
+ x += r
98
+ elif dir == 3:
99
+ x += r
100
+ y -= r
101
+ elif dir == 4:
102
+ y += r
103
+ if 0 <= x < rows and 0 <= y < cols:
104
+ if check(word, dir, x, y, grid, rows, cols):
105
+ # used.append((save, dir, x, y, word))
106
+ used.append(save)
107
+ break
108
+
109
+ # Fill up the remaining spaces
110
+ for i in range(rows):
111
+ for j in range(cols):
112
+ if grid[i][j] == "":
113
+ grid[i][j] = random.choice(alph)
114
+
115
+ return grid, used
116
+
117
+
118
+ def word_finder():
119
+ from nltk.corpus import words
120
+
121
+ wordlist = words.words()
122
+ random.shuffle(wordlist)
123
+ wordlist = wordlist[:200]
124
+ wordlist = [w for w in wordlist if 3 <= len(w) <= 12]
125
+ grid, used = wordfinder(wordlist)
126
+
127
+ print("Word Finder\n")
128
+ for i in range(len(grid)):
129
+ for j in range(len(grid[i])):
130
+ print(grid[i][j], end=" ")
131
+ print()
132
+ print()
133
+
134
+ for i in range(len(used)):
135
+ print("%d:" % (i + 1), used[i])
136
+
137
+
138
+ if __name__ == "__main__":
139
+ word_finder()
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc ADDED
Binary file (9.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Model Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import unittest
9
+
10
+ import pytest
11
+
12
+ from nltk import FreqDist
13
+ from nltk.lm import NgramCounter
14
+ from nltk.util import everygrams
15
+
16
+
17
+ class TestNgramCounter:
18
+ """Tests for NgramCounter that only involve lookup, no modification."""
19
+
20
+ @classmethod
21
+ def setup_class(self):
22
+ text = [list("abcd"), list("egdbe")]
23
+ self.trigram_counter = NgramCounter(
24
+ everygrams(sent, max_len=3) for sent in text
25
+ )
26
+ self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text)
27
+ self.case = unittest.TestCase()
28
+
29
+ def test_N(self):
30
+ assert self.bigram_counter.N() == 16
31
+ assert self.trigram_counter.N() == 21
32
+
33
+ def test_counter_len_changes_with_lookup(self):
34
+ assert len(self.bigram_counter) == 2
35
+ self.bigram_counter[50]
36
+ assert len(self.bigram_counter) == 3
37
+
38
+ def test_ngram_order_access_unigrams(self):
39
+ assert self.bigram_counter[1] == self.bigram_counter.unigrams
40
+
41
+ def test_ngram_conditional_freqdist(self):
42
+ case = unittest.TestCase()
43
+ expected_trigram_contexts = [
44
+ ("a", "b"),
45
+ ("b", "c"),
46
+ ("e", "g"),
47
+ ("g", "d"),
48
+ ("d", "b"),
49
+ ]
50
+ expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)]
51
+
52
+ bigrams = self.trigram_counter[2]
53
+ trigrams = self.trigram_counter[3]
54
+
55
+ self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions())
56
+ self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions())
57
+
58
+ def test_bigram_counts_seen_ngrams(self):
59
+ assert self.bigram_counter[["a"]]["b"] == 1
60
+ assert self.bigram_counter[["b"]]["c"] == 1
61
+
62
+ def test_bigram_counts_unseen_ngrams(self):
63
+ assert self.bigram_counter[["b"]]["z"] == 0
64
+
65
+ def test_unigram_counts_seen_words(self):
66
+ assert self.bigram_counter["b"] == 2
67
+
68
+ def test_unigram_counts_completely_unseen_words(self):
69
+ assert self.bigram_counter["z"] == 0
70
+
71
+
72
+ class TestNgramCounterTraining:
73
+ @classmethod
74
+ def setup_class(self):
75
+ self.counter = NgramCounter()
76
+ self.case = unittest.TestCase()
77
+
78
+ @pytest.mark.parametrize("case", ["", [], None])
79
+ def test_empty_inputs(self, case):
80
+ test = NgramCounter(case)
81
+ assert 2 not in test
82
+ assert test[1] == FreqDist()
83
+
84
+ def test_train_on_unigrams(self):
85
+ words = list("abcd")
86
+ counter = NgramCounter([[(w,) for w in words]])
87
+
88
+ assert not counter[3]
89
+ assert not counter[2]
90
+ self.case.assertCountEqual(words, counter[1].keys())
91
+
92
+ def test_train_on_illegal_sentences(self):
93
+ str_sent = ["Check", "this", "out", "!"]
94
+ list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]]
95
+
96
+ with pytest.raises(TypeError):
97
+ NgramCounter([str_sent])
98
+
99
+ with pytest.raises(TypeError):
100
+ NgramCounter([list_sent])
101
+
102
+ def test_train_on_bigrams(self):
103
+ bigram_sent = [("a", "b"), ("c", "d")]
104
+ counter = NgramCounter([bigram_sent])
105
+ assert not bool(counter[3])
106
+
107
+ def test_train_on_mix(self):
108
+ mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)]
109
+ counter = NgramCounter([mixed_sent])
110
+ unigrams = ["h"]
111
+ bigram_contexts = [("a",), ("c",)]
112
+ trigram_contexts = [("e", "f")]
113
+
114
+ self.case.assertCountEqual(unigrams, counter[1].keys())
115
+ self.case.assertCountEqual(bigram_contexts, counter[2].keys())
116
+ self.case.assertCountEqual(trigram_contexts, counter[3].keys())
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Model Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ import math
8
+ from operator import itemgetter
9
+
10
+ import pytest
11
+
12
+ from nltk.lm import (
13
+ MLE,
14
+ AbsoluteDiscountingInterpolated,
15
+ KneserNeyInterpolated,
16
+ Laplace,
17
+ Lidstone,
18
+ StupidBackoff,
19
+ Vocabulary,
20
+ WittenBellInterpolated,
21
+ )
22
+ from nltk.lm.preprocessing import padded_everygrams
23
+
24
+
25
+ @pytest.fixture(scope="session")
26
+ def vocabulary():
27
+ return Vocabulary(["a", "b", "c", "d", "z", "<s>", "</s>"], unk_cutoff=1)
28
+
29
+
30
+ @pytest.fixture(scope="session")
31
+ def training_data():
32
+ return [["a", "b", "c", "d"], ["e", "g", "a", "d", "b", "e"]]
33
+
34
+
35
+ @pytest.fixture(scope="session")
36
+ def bigram_training_data(training_data):
37
+ return [list(padded_everygrams(2, sent)) for sent in training_data]
38
+
39
+
40
+ @pytest.fixture(scope="session")
41
+ def trigram_training_data(training_data):
42
+ return [list(padded_everygrams(3, sent)) for sent in training_data]
43
+
44
+
45
+ @pytest.fixture
46
+ def mle_bigram_model(vocabulary, bigram_training_data):
47
+ model = MLE(2, vocabulary=vocabulary)
48
+ model.fit(bigram_training_data)
49
+ return model
50
+
51
+
52
+ @pytest.mark.parametrize(
53
+ "word, context, expected_score",
54
+ [
55
+ ("d", ["c"], 1),
56
+ # Unseen ngrams should yield 0
57
+ ("d", ["e"], 0),
58
+ # Unigrams should also be 0
59
+ ("z", None, 0),
60
+ # N unigrams = 14
61
+ # count('a') = 2
62
+ ("a", None, 2.0 / 14),
63
+ # count('y') = 3
64
+ ("y", None, 3.0 / 14),
65
+ ],
66
+ )
67
+ def test_mle_bigram_scores(mle_bigram_model, word, context, expected_score):
68
+ assert pytest.approx(mle_bigram_model.score(word, context), 1e-4) == expected_score
69
+
70
+
71
+ def test_mle_bigram_logscore_for_zero_score(mle_bigram_model):
72
+ assert math.isinf(mle_bigram_model.logscore("d", ["e"]))
73
+
74
+
75
+ def test_mle_bigram_entropy_perplexity_seen(mle_bigram_model):
76
+ # ngrams seen during training
77
+ trained = [
78
+ ("<s>", "a"),
79
+ ("a", "b"),
80
+ ("b", "<UNK>"),
81
+ ("<UNK>", "a"),
82
+ ("a", "d"),
83
+ ("d", "</s>"),
84
+ ]
85
+ # Ngram = Log score
86
+ # <s>, a = -1
87
+ # a, b = -1
88
+ # b, UNK = -1
89
+ # UNK, a = -1.585
90
+ # a, d = -1
91
+ # d, </s> = -1
92
+ # TOTAL logscores = -6.585
93
+ # - AVG logscores = 1.0975
94
+ H = 1.0975
95
+ perplexity = 2.1398
96
+ assert pytest.approx(mle_bigram_model.entropy(trained), 1e-4) == H
97
+ assert pytest.approx(mle_bigram_model.perplexity(trained), 1e-4) == perplexity
98
+
99
+
100
+ def test_mle_bigram_entropy_perplexity_unseen(mle_bigram_model):
101
+ # In MLE, even one unseen ngram should make entropy and perplexity infinite
102
+ untrained = [("<s>", "a"), ("a", "c"), ("c", "d"), ("d", "</s>")]
103
+
104
+ assert math.isinf(mle_bigram_model.entropy(untrained))
105
+ assert math.isinf(mle_bigram_model.perplexity(untrained))
106
+
107
+
108
+ def test_mle_bigram_entropy_perplexity_unigrams(mle_bigram_model):
109
+ # word = score, log score
110
+ # <s> = 0.1429, -2.8074
111
+ # a = 0.1429, -2.8074
112
+ # c = 0.0714, -3.8073
113
+ # UNK = 0.2143, -2.2224
114
+ # d = 0.1429, -2.8074
115
+ # c = 0.0714, -3.8073
116
+ # </s> = 0.1429, -2.8074
117
+ # TOTAL logscores = -21.6243
118
+ # - AVG logscores = 3.0095
119
+ H = 3.0095
120
+ perplexity = 8.0529
121
+
122
+ text = [("<s>",), ("a",), ("c",), ("-",), ("d",), ("c",), ("</s>",)]
123
+
124
+ assert pytest.approx(mle_bigram_model.entropy(text), 1e-4) == H
125
+ assert pytest.approx(mle_bigram_model.perplexity(text), 1e-4) == perplexity
126
+
127
+
128
+ @pytest.fixture
129
+ def mle_trigram_model(trigram_training_data, vocabulary):
130
+ model = MLE(order=3, vocabulary=vocabulary)
131
+ model.fit(trigram_training_data)
132
+ return model
133
+
134
+
135
+ @pytest.mark.parametrize(
136
+ "word, context, expected_score",
137
+ [
138
+ # count(d | b, c) = 1
139
+ # count(b, c) = 1
140
+ ("d", ("b", "c"), 1),
141
+ # count(d | c) = 1
142
+ # count(c) = 1
143
+ ("d", ["c"], 1),
144
+ # total number of tokens is 18, of which "a" occurred 2 times
145
+ ("a", None, 2.0 / 18),
146
+ # in vocabulary but unseen
147
+ ("z", None, 0),
148
+ # out of vocabulary should use "UNK" score
149
+ ("y", None, 3.0 / 18),
150
+ ],
151
+ )
152
+ def test_mle_trigram_scores(mle_trigram_model, word, context, expected_score):
153
+ assert pytest.approx(mle_trigram_model.score(word, context), 1e-4) == expected_score
154
+
155
+
156
+ @pytest.fixture
157
+ def lidstone_bigram_model(bigram_training_data, vocabulary):
158
+ model = Lidstone(0.1, order=2, vocabulary=vocabulary)
159
+ model.fit(bigram_training_data)
160
+ return model
161
+
162
+
163
+ @pytest.mark.parametrize(
164
+ "word, context, expected_score",
165
+ [
166
+ # count(d | c) = 1
167
+ # *count(d | c) = 1.1
168
+ # Count(w | c for w in vocab) = 1
169
+ # *Count(w | c for w in vocab) = 1.8
170
+ ("d", ["c"], 1.1 / 1.8),
171
+ # Total unigrams: 14
172
+ # Vocab size: 8
173
+ # Denominator: 14 + 0.8 = 14.8
174
+ # count("a") = 2
175
+ # *count("a") = 2.1
176
+ ("a", None, 2.1 / 14.8),
177
+ # in vocabulary but unseen
178
+ # count("z") = 0
179
+ # *count("z") = 0.1
180
+ ("z", None, 0.1 / 14.8),
181
+ # out of vocabulary should use "UNK" score
182
+ # count("<UNK>") = 3
183
+ # *count("<UNK>") = 3.1
184
+ ("y", None, 3.1 / 14.8),
185
+ ],
186
+ )
187
+ def test_lidstone_bigram_score(lidstone_bigram_model, word, context, expected_score):
188
+ assert (
189
+ pytest.approx(lidstone_bigram_model.score(word, context), 1e-4)
190
+ == expected_score
191
+ )
192
+
193
+
194
+ def test_lidstone_entropy_perplexity(lidstone_bigram_model):
195
+ text = [
196
+ ("<s>", "a"),
197
+ ("a", "c"),
198
+ ("c", "<UNK>"),
199
+ ("<UNK>", "d"),
200
+ ("d", "c"),
201
+ ("c", "</s>"),
202
+ ]
203
+ # Unlike MLE this should be able to handle completely novel ngrams
204
+ # Ngram = score, log score
205
+ # <s>, a = 0.3929, -1.3479
206
+ # a, c = 0.0357, -4.8074
207
+ # c, UNK = 0.0(5), -4.1699
208
+ # UNK, d = 0.0263, -5.2479
209
+ # d, c = 0.0357, -4.8074
210
+ # c, </s> = 0.0(5), -4.1699
211
+ # TOTAL logscore: −24.5504
212
+ # - AVG logscore: 4.0917
213
+ H = 4.0917
214
+ perplexity = 17.0504
215
+ assert pytest.approx(lidstone_bigram_model.entropy(text), 1e-4) == H
216
+ assert pytest.approx(lidstone_bigram_model.perplexity(text), 1e-4) == perplexity
217
+
218
+
219
+ @pytest.fixture
220
+ def lidstone_trigram_model(trigram_training_data, vocabulary):
221
+ model = Lidstone(0.1, order=3, vocabulary=vocabulary)
222
+ model.fit(trigram_training_data)
223
+ return model
224
+
225
+
226
+ @pytest.mark.parametrize(
227
+ "word, context, expected_score",
228
+ [
229
+ # Logic behind this is the same as for bigram model
230
+ ("d", ["c"], 1.1 / 1.8),
231
+ # if we choose a word that hasn't appeared after (b, c)
232
+ ("e", ["c"], 0.1 / 1.8),
233
+ # Trigram score now
234
+ ("d", ["b", "c"], 1.1 / 1.8),
235
+ ("e", ["b", "c"], 0.1 / 1.8),
236
+ ],
237
+ )
238
+ def test_lidstone_trigram_score(lidstone_trigram_model, word, context, expected_score):
239
+ assert (
240
+ pytest.approx(lidstone_trigram_model.score(word, context), 1e-4)
241
+ == expected_score
242
+ )
243
+
244
+
245
+ @pytest.fixture
246
+ def laplace_bigram_model(bigram_training_data, vocabulary):
247
+ model = Laplace(2, vocabulary=vocabulary)
248
+ model.fit(bigram_training_data)
249
+ return model
250
+
251
+
252
+ @pytest.mark.parametrize(
253
+ "word, context, expected_score",
254
+ [
255
+ # basic sanity-check:
256
+ # count(d | c) = 1
257
+ # *count(d | c) = 2
258
+ # Count(w | c for w in vocab) = 1
259
+ # *Count(w | c for w in vocab) = 9
260
+ ("d", ["c"], 2.0 / 9),
261
+ # Total unigrams: 14
262
+ # Vocab size: 8
263
+ # Denominator: 14 + 8 = 22
264
+ # count("a") = 2
265
+ # *count("a") = 3
266
+ ("a", None, 3.0 / 22),
267
+ # in vocabulary but unseen
268
+ # count("z") = 0
269
+ # *count("z") = 1
270
+ ("z", None, 1.0 / 22),
271
+ # out of vocabulary should use "UNK" score
272
+ # count("<UNK>") = 3
273
+ # *count("<UNK>") = 4
274
+ ("y", None, 4.0 / 22),
275
+ ],
276
+ )
277
+ def test_laplace_bigram_score(laplace_bigram_model, word, context, expected_score):
278
+ assert (
279
+ pytest.approx(laplace_bigram_model.score(word, context), 1e-4) == expected_score
280
+ )
281
+
282
+
283
+ def test_laplace_bigram_entropy_perplexity(laplace_bigram_model):
284
+ text = [
285
+ ("<s>", "a"),
286
+ ("a", "c"),
287
+ ("c", "<UNK>"),
288
+ ("<UNK>", "d"),
289
+ ("d", "c"),
290
+ ("c", "</s>"),
291
+ ]
292
+ # Unlike MLE this should be able to handle completely novel ngrams
293
+ # Ngram = score, log score
294
+ # <s>, a = 0.2, -2.3219
295
+ # a, c = 0.1, -3.3219
296
+ # c, UNK = 0.(1), -3.1699
297
+ # UNK, d = 0.(09), 3.4594
298
+ # d, c = 0.1 -3.3219
299
+ # c, </s> = 0.(1), -3.1699
300
+ # Total logscores: −18.7651
301
+ # - AVG logscores: 3.1275
302
+ H = 3.1275
303
+ perplexity = 8.7393
304
+ assert pytest.approx(laplace_bigram_model.entropy(text), 1e-4) == H
305
+ assert pytest.approx(laplace_bigram_model.perplexity(text), 1e-4) == perplexity
306
+
307
+
308
+ def test_laplace_gamma(laplace_bigram_model):
309
+ assert laplace_bigram_model.gamma == 1
310
+
311
+
312
+ @pytest.fixture
313
+ def wittenbell_trigram_model(trigram_training_data, vocabulary):
314
+ model = WittenBellInterpolated(3, vocabulary=vocabulary)
315
+ model.fit(trigram_training_data)
316
+ return model
317
+
318
+
319
+ @pytest.mark.parametrize(
320
+ "word, context, expected_score",
321
+ [
322
+ # For unigram scores by default revert to regular MLE
323
+ # Total unigrams: 18
324
+ # Vocab Size = 7
325
+ # count('c'): 1
326
+ ("c", None, 1.0 / 18),
327
+ # in vocabulary but unseen
328
+ # count("z") = 0
329
+ ("z", None, 0 / 18),
330
+ # out of vocabulary should use "UNK" score
331
+ # count("<UNK>") = 3
332
+ ("y", None, 3.0 / 18),
333
+ # 2 words follow b and b occurred a total of 2 times
334
+ # gamma(['b']) = 2 / (2 + 2) = 0.5
335
+ # mle.score('c', ['b']) = 0.5
336
+ # mle('c') = 1 / 18 = 0.055
337
+ # (1 - gamma) * mle + gamma * mle('c') ~= 0.27 + 0.055
338
+ ("c", ["b"], (1 - 0.5) * 0.5 + 0.5 * 1 / 18),
339
+ # building on that, let's try 'a b c' as the trigram
340
+ # 1 word follows 'a b' and 'a b' occurred 1 time
341
+ # gamma(['a', 'b']) = 1 / (1 + 1) = 0.5
342
+ # mle("c", ["a", "b"]) = 1
343
+ ("c", ["a", "b"], (1 - 0.5) + 0.5 * ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)),
344
+ # P(c|zb)
345
+ # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
346
+ ("c", ["z", "b"], ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)),
347
+ ],
348
+ )
349
+ def test_wittenbell_trigram_score(
350
+ wittenbell_trigram_model, word, context, expected_score
351
+ ):
352
+ assert (
353
+ pytest.approx(wittenbell_trigram_model.score(word, context), 1e-4)
354
+ == expected_score
355
+ )
356
+
357
+
358
+ ###############################################################################
359
+ # Notation Explained #
360
+ ###############################################################################
361
+ # For all subsequent calculations we use the following notation:
362
+ # 1. '*': Placeholder for any word/character. E.g. '*b' stands for
363
+ # all bigrams that end in 'b'. '*b*' stands for all trigrams that
364
+ # contain 'b' in the middle.
365
+ # 1. count(ngram): Count all instances (tokens) of an ngram.
366
+ # 1. unique(ngram): Count unique instances (types) of an ngram.
367
+
368
+
369
+ @pytest.fixture
370
+ def kneserney_trigram_model(trigram_training_data, vocabulary):
371
+ model = KneserNeyInterpolated(order=3, discount=0.75, vocabulary=vocabulary)
372
+ model.fit(trigram_training_data)
373
+ return model
374
+
375
+
376
+ @pytest.mark.parametrize(
377
+ "word, context, expected_score",
378
+ [
379
+ # P(c) = count('*c') / unique('**')
380
+ # = 1 / 14
381
+ ("c", None, 1.0 / 14),
382
+ # P(z) = count('*z') / unique('**')
383
+ # = 0 / 14
384
+ # 'z' is in the vocabulary, but it was not seen during training.
385
+ ("z", None, 0.0 / 14),
386
+ # P(y)
387
+ # Out of vocabulary should use "UNK" score.
388
+ # P(y) = P(UNK) = count('*UNK') / unique('**')
389
+ ("y", None, 3 / 14),
390
+ # We start with P(c|b)
391
+ # P(c|b) = alpha('bc') + gamma('b') * P(c)
392
+ # alpha('bc') = max(unique('*bc') - discount, 0) / unique('*b*')
393
+ # = max(1 - 0.75, 0) / 2
394
+ # = 0.125
395
+ # gamma('b') = discount * unique('b*') / unique('*b*')
396
+ # = (0.75 * 2) / 2
397
+ # = 0.75
398
+ ("c", ["b"], (0.125 + 0.75 * (1 / 14))),
399
+ # Building on that, let's try P(c|ab).
400
+ # P(c|ab) = alpha('abc') + gamma('ab') * P(c|b)
401
+ # alpha('abc') = max(count('abc') - discount, 0) / count('ab*')
402
+ # = max(1 - 0.75, 0) / 1
403
+ # = 0.25
404
+ # gamma('ab') = (discount * unique('ab*')) / count('ab*')
405
+ # = 0.75 * 1 / 1
406
+ ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (1 / 14))),
407
+ # P(c|zb)
408
+ # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
409
+ ("c", ["z", "b"], (0.125 + 0.75 * (1 / 14))),
410
+ ],
411
+ )
412
+ def test_kneserney_trigram_score(
413
+ kneserney_trigram_model, word, context, expected_score
414
+ ):
415
+ assert (
416
+ pytest.approx(kneserney_trigram_model.score(word, context), 1e-4)
417
+ == expected_score
418
+ )
419
+
420
+
421
+ @pytest.fixture
422
+ def absolute_discounting_trigram_model(trigram_training_data, vocabulary):
423
+ model = AbsoluteDiscountingInterpolated(order=3, vocabulary=vocabulary)
424
+ model.fit(trigram_training_data)
425
+ return model
426
+
427
+
428
+ @pytest.mark.parametrize(
429
+ "word, context, expected_score",
430
+ [
431
+ # For unigram scores revert to uniform
432
+ # P(c) = count('c') / count('**')
433
+ ("c", None, 1.0 / 18),
434
+ # in vocabulary but unseen
435
+ # count('z') = 0
436
+ ("z", None, 0.0 / 18),
437
+ # out of vocabulary should use "UNK" score
438
+ # count('<UNK>') = 3
439
+ ("y", None, 3 / 18),
440
+ # P(c|b) = alpha('bc') + gamma('b') * P(c)
441
+ # alpha('bc') = max(count('bc') - discount, 0) / count('b*')
442
+ # = max(1 - 0.75, 0) / 2
443
+ # = 0.125
444
+ # gamma('b') = discount * unique('b*') / count('b*')
445
+ # = (0.75 * 2) / 2
446
+ # = 0.75
447
+ ("c", ["b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))),
448
+ # Building on that, let's try P(c|ab).
449
+ # P(c|ab) = alpha('abc') + gamma('ab') * P(c|b)
450
+ # alpha('abc') = max(count('abc') - discount, 0) / count('ab*')
451
+ # = max(1 - 0.75, 0) / 1
452
+ # = 0.25
453
+ # gamma('ab') = (discount * unique('ab*')) / count('ab*')
454
+ # = 0.75 * 1 / 1
455
+ ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (2 / 2) * (1 / 18))),
456
+ # P(c|zb)
457
+ # The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
458
+ ("c", ["z", "b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))),
459
+ ],
460
+ )
461
+ def test_absolute_discounting_trigram_score(
462
+ absolute_discounting_trigram_model, word, context, expected_score
463
+ ):
464
+ assert (
465
+ pytest.approx(absolute_discounting_trigram_model.score(word, context), 1e-4)
466
+ == expected_score
467
+ )
468
+
469
+
470
+ @pytest.fixture
471
+ def stupid_backoff_trigram_model(trigram_training_data, vocabulary):
472
+ model = StupidBackoff(order=3, vocabulary=vocabulary)
473
+ model.fit(trigram_training_data)
474
+ return model
475
+
476
+
477
+ @pytest.mark.parametrize(
478
+ "word, context, expected_score",
479
+ [
480
+ # For unigram scores revert to uniform
481
+ # total bigrams = 18
482
+ ("c", None, 1.0 / 18),
483
+ # in vocabulary but unseen
484
+ # bigrams ending with z = 0
485
+ ("z", None, 0.0 / 18),
486
+ # out of vocabulary should use "UNK" score
487
+ # count('<UNK>'): 3
488
+ ("y", None, 3 / 18),
489
+ # c follows 1 time out of 2 after b
490
+ ("c", ["b"], 1 / 2),
491
+ # c always follows ab
492
+ ("c", ["a", "b"], 1 / 1),
493
+ # The ngram 'z b c' was not seen, so we backoff to
494
+ # the score of the ngram 'b c' * smoothing factor
495
+ ("c", ["z", "b"], (0.4 * (1 / 2))),
496
+ ],
497
+ )
498
+ def test_stupid_backoff_trigram_score(
499
+ stupid_backoff_trigram_model, word, context, expected_score
500
+ ):
501
+ assert (
502
+ pytest.approx(stupid_backoff_trigram_model.score(word, context), 1e-4)
503
+ == expected_score
504
+ )
505
+
506
+
507
+ ###############################################################################
508
+ # Probability Distributions Should Sum up to Unity #
509
+ ###############################################################################
510
+
511
+
512
+ @pytest.fixture(scope="session")
513
+ def kneserney_bigram_model(bigram_training_data, vocabulary):
514
+ model = KneserNeyInterpolated(order=2, vocabulary=vocabulary)
515
+ model.fit(bigram_training_data)
516
+ return model
517
+
518
+
519
+ @pytest.mark.parametrize(
520
+ "model_fixture",
521
+ [
522
+ "mle_bigram_model",
523
+ "mle_trigram_model",
524
+ "lidstone_bigram_model",
525
+ "laplace_bigram_model",
526
+ "wittenbell_trigram_model",
527
+ "absolute_discounting_trigram_model",
528
+ "kneserney_bigram_model",
529
+ pytest.param(
530
+ "stupid_backoff_trigram_model",
531
+ marks=pytest.mark.xfail(
532
+ reason="Stupid Backoff is not a valid distribution"
533
+ ),
534
+ ),
535
+ ],
536
+ )
537
+ @pytest.mark.parametrize(
538
+ "context",
539
+ [("a",), ("c",), ("<s>",), ("b",), ("<UNK>",), ("d",), ("e",), ("r",), ("w",)],
540
+ ids=itemgetter(0),
541
+ )
542
+ def test_sums_to_1(model_fixture, context, request):
543
+ model = request.getfixturevalue(model_fixture)
544
+ scores_for_context = sum(model.score(w, context) for w in model.vocab)
545
+ assert pytest.approx(scores_for_context, 1e-7) == 1.0
546
+
547
+
548
+ ###############################################################################
549
+ # Generating Text #
550
+ ###############################################################################
551
+
552
+
553
+ def test_generate_one_no_context(mle_trigram_model):
554
+ assert mle_trigram_model.generate(random_seed=3) == "<UNK>"
555
+
556
+
557
+ def test_generate_one_from_limiting_context(mle_trigram_model):
558
+ # We don't need random_seed for contexts with only one continuation
559
+ assert mle_trigram_model.generate(text_seed=["c"]) == "d"
560
+ assert mle_trigram_model.generate(text_seed=["b", "c"]) == "d"
561
+ assert mle_trigram_model.generate(text_seed=["a", "c"]) == "d"
562
+
563
+
564
+ def test_generate_one_from_varied_context(mle_trigram_model):
565
+ # When context doesn't limit our options enough, seed the random choice
566
+ assert mle_trigram_model.generate(text_seed=("a", "<s>"), random_seed=2) == "a"
567
+
568
+
569
+ def test_generate_cycle(mle_trigram_model):
570
+ # Add a cycle to the model: bd -> b, db -> d
571
+ more_training_text = [padded_everygrams(mle_trigram_model.order, list("bdbdbd"))]
572
+
573
+ mle_trigram_model.fit(more_training_text)
574
+ # Test that we can escape the cycle
575
+ assert mle_trigram_model.generate(7, text_seed=("b", "d"), random_seed=5) == [
576
+ "b",
577
+ "d",
578
+ "b",
579
+ "d",
580
+ "b",
581
+ "d",
582
+ "</s>",
583
+ ]
584
+
585
+
586
+ def test_generate_with_text_seed(mle_trigram_model):
587
+ assert mle_trigram_model.generate(5, text_seed=("<s>", "e"), random_seed=3) == [
588
+ "<UNK>",
589
+ "a",
590
+ "d",
591
+ "b",
592
+ "<UNK>",
593
+ ]
594
+
595
+
596
+ def test_generate_oov_text_seed(mle_trigram_model):
597
+ assert mle_trigram_model.generate(
598
+ text_seed=("aliens",), random_seed=3
599
+ ) == mle_trigram_model.generate(text_seed=("<UNK>",), random_seed=3)
600
+
601
+
602
+ def test_generate_None_text_seed(mle_trigram_model):
603
+ # should crash with type error when we try to look it up in vocabulary
604
+ with pytest.raises(TypeError):
605
+ mle_trigram_model.generate(text_seed=(None,))
606
+
607
+ # This will work
608
+ assert mle_trigram_model.generate(
609
+ text_seed=None, random_seed=3
610
+ ) == mle_trigram_model.generate(random_seed=3)
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Model Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ import unittest
8
+
9
+ from nltk.lm.preprocessing import padded_everygram_pipeline
10
+
11
+
12
+ class TestPreprocessing(unittest.TestCase):
13
+ def test_padded_everygram_pipeline(self):
14
+ expected_train = [
15
+ [
16
+ ("<s>",),
17
+ ("<s>", "a"),
18
+ ("a",),
19
+ ("a", "b"),
20
+ ("b",),
21
+ ("b", "c"),
22
+ ("c",),
23
+ ("c", "</s>"),
24
+ ("</s>",),
25
+ ]
26
+ ]
27
+ expected_vocab = ["<s>", "a", "b", "c", "</s>"]
28
+ train_data, vocab_data = padded_everygram_pipeline(2, [["a", "b", "c"]])
29
+ self.assertEqual([list(sent) for sent in train_data], expected_train)
30
+ self.assertEqual(list(vocab_data), expected_vocab)
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Model Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import unittest
9
+ from collections import Counter
10
+ from timeit import timeit
11
+
12
+ from nltk.lm import Vocabulary
13
+
14
+
15
+ class NgramModelVocabularyTests(unittest.TestCase):
16
+ """tests Vocabulary Class"""
17
+
18
+ @classmethod
19
+ def setUpClass(cls):
20
+ cls.vocab = Vocabulary(
21
+ ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"],
22
+ unk_cutoff=2,
23
+ )
24
+
25
+ def test_truthiness(self):
26
+ self.assertTrue(self.vocab)
27
+
28
+ def test_cutoff_value_set_correctly(self):
29
+ self.assertEqual(self.vocab.cutoff, 2)
30
+
31
+ def test_unable_to_change_cutoff(self):
32
+ with self.assertRaises(AttributeError):
33
+ self.vocab.cutoff = 3
34
+
35
+ def test_cutoff_setter_checks_value(self):
36
+ with self.assertRaises(ValueError) as exc_info:
37
+ Vocabulary("abc", unk_cutoff=0)
38
+ expected_error_msg = "Cutoff value cannot be less than 1. Got: 0"
39
+ self.assertEqual(expected_error_msg, str(exc_info.exception))
40
+
41
+ def test_counts_set_correctly(self):
42
+ self.assertEqual(self.vocab.counts["a"], 2)
43
+ self.assertEqual(self.vocab.counts["b"], 2)
44
+ self.assertEqual(self.vocab.counts["c"], 1)
45
+
46
+ def test_membership_check_respects_cutoff(self):
47
+ # a was seen 2 times, so it should be considered part of the vocabulary
48
+ self.assertTrue("a" in self.vocab)
49
+ # "c" was seen once, it shouldn't be considered part of the vocab
50
+ self.assertFalse("c" in self.vocab)
51
+ # "z" was never seen at all, also shouldn't be considered in the vocab
52
+ self.assertFalse("z" in self.vocab)
53
+
54
+ def test_vocab_len_respects_cutoff(self):
55
+ # Vocab size is the number of unique tokens that occur at least as often
56
+ # as the cutoff value, plus 1 to account for unknown words.
57
+ self.assertEqual(5, len(self.vocab))
58
+
59
+ def test_vocab_iter_respects_cutoff(self):
60
+ vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"]
61
+ vocab_items = ["a", "b", "d", "e", "<UNK>"]
62
+
63
+ self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys()))
64
+ self.assertCountEqual(vocab_items, list(self.vocab))
65
+
66
+ def test_update_empty_vocab(self):
67
+ empty = Vocabulary(unk_cutoff=2)
68
+ self.assertEqual(len(empty), 0)
69
+ self.assertFalse(empty)
70
+ self.assertIn(empty.unk_label, empty)
71
+
72
+ empty.update(list("abcde"))
73
+ self.assertIn(empty.unk_label, empty)
74
+
75
+ def test_lookup(self):
76
+ self.assertEqual(self.vocab.lookup("a"), "a")
77
+ self.assertEqual(self.vocab.lookup("c"), "<UNK>")
78
+
79
+ def test_lookup_iterables(self):
80
+ self.assertEqual(self.vocab.lookup(["a", "b"]), ("a", "b"))
81
+ self.assertEqual(self.vocab.lookup(("a", "b")), ("a", "b"))
82
+ self.assertEqual(self.vocab.lookup(("a", "c")), ("a", "<UNK>"))
83
+ self.assertEqual(
84
+ self.vocab.lookup(map(str, range(3))), ("<UNK>", "<UNK>", "<UNK>")
85
+ )
86
+
87
+ def test_lookup_empty_iterables(self):
88
+ self.assertEqual(self.vocab.lookup(()), ())
89
+ self.assertEqual(self.vocab.lookup([]), ())
90
+ self.assertEqual(self.vocab.lookup(iter([])), ())
91
+ self.assertEqual(self.vocab.lookup(n for n in range(0, 0)), ())
92
+
93
+ def test_lookup_recursive(self):
94
+ self.assertEqual(
95
+ self.vocab.lookup([["a", "b"], ["a", "c"]]), (("a", "b"), ("a", "<UNK>"))
96
+ )
97
+ self.assertEqual(self.vocab.lookup([["a", "b"], "c"]), (("a", "b"), "<UNK>"))
98
+ self.assertEqual(self.vocab.lookup([[[[["a", "b"]]]]]), ((((("a", "b"),),),),))
99
+
100
+ def test_lookup_None(self):
101
+ with self.assertRaises(TypeError):
102
+ self.vocab.lookup(None)
103
+ with self.assertRaises(TypeError):
104
+ list(self.vocab.lookup([None, None]))
105
+
106
+ def test_lookup_int(self):
107
+ with self.assertRaises(TypeError):
108
+ self.vocab.lookup(1)
109
+ with self.assertRaises(TypeError):
110
+ list(self.vocab.lookup([1, 2]))
111
+
112
+ def test_lookup_empty_str(self):
113
+ self.assertEqual(self.vocab.lookup(""), "<UNK>")
114
+
115
+ def test_eqality(self):
116
+ v1 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
117
+ v2 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
118
+ v3 = Vocabulary(["a", "b", "c"], unk_cutoff=1, unk_label="blah")
119
+ v4 = Vocabulary(["a", "b"], unk_cutoff=1)
120
+
121
+ self.assertEqual(v1, v2)
122
+ self.assertNotEqual(v1, v3)
123
+ self.assertNotEqual(v1, v4)
124
+
125
+ def test_str(self):
126
+ self.assertEqual(
127
+ str(self.vocab), "<Vocabulary with cutoff=2 unk_label='<UNK>' and 5 items>"
128
+ )
129
+
130
+ def test_creation_with_counter(self):
131
+ self.assertEqual(
132
+ self.vocab,
133
+ Vocabulary(
134
+ Counter(
135
+ ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"]
136
+ ),
137
+ unk_cutoff=2,
138
+ ),
139
+ )
140
+
141
+ @unittest.skip(
142
+ reason="Test is known to be flaky as it compares (runtime) performance."
143
+ )
144
+ def test_len_is_constant(self):
145
+ # Given an obviously small and an obviously large vocabulary.
146
+ small_vocab = Vocabulary("abcde")
147
+ from nltk.corpus.europarl_raw import english
148
+
149
+ large_vocab = Vocabulary(english.words())
150
+
151
+ # If we time calling `len` on them.
152
+ small_vocab_len_time = timeit("len(small_vocab)", globals=locals())
153
+ large_vocab_len_time = timeit("len(large_vocab)", globals=locals())
154
+
155
+ # The timing should be the same order of magnitude.
156
+ self.assertAlmostEqual(small_vocab_len_time, large_vocab_len_time, places=1)
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc ADDED
Binary file (9.14 kB). View file