applied-ai-018 commited on
Commit
4854382
·
verified ·
1 Parent(s): 3c28e9c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/INSTALLER +1 -0
  2. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/LICENSE +20 -0
  3. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/METADATA +66 -0
  4. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/RECORD +14 -0
  5. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/WHEEL +5 -0
  6. llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/top_level.txt +1 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/VERSION +1 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/__init__.py +209 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/book.py +213 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__init__.py +34 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/ccg/api.py +358 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/ccg/chart.py +480 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/ccg/combinator.py +339 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/ccg/logic.py +60 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__init__.py +197 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/chunk/api.py +56 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/chunk/named_entity.py +352 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/chunk/regexp.py +1475 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/chunk/util.py +643 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/cli.py +55 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/collections.py +661 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/collocations.py +412 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/compat.py +43 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/data.py +1441 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/decorators.py +251 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/downloader.py +2559 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/featstruct.py +0 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/grammar.py +1708 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/help.py +64 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/internals.py +1123 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/jsontags.py +65 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/langnames.py +730 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/lazyimport.py +142 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/probability.py +2578 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/stem/api.py +27 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/stem/util.py +25 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/text.py +779 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/tgrep.py +1039 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/toolbox.py +524 -0
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This package contains a modified version of ca-bundle.crt:
2
+
3
+ ca-bundle.crt -- Bundle of CA Root Certificates
4
+
5
+ This is a bundle of X.509 certificates of public Certificate Authorities
6
+ (CA). These were automatically extracted from Mozilla's root certificates
7
+ file (certdata.txt). This file can be found in the mozilla source tree:
8
+ https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
9
+ It contains the certificates in PEM format and therefore
10
+ can be directly used with curl / libcurl / php_curl, or with
11
+ an Apache+mod_ssl webserver for SSL client authentication.
12
+ Just configure this file as the SSLCACertificateFile.#
13
+
14
+ ***** BEGIN LICENSE BLOCK *****
15
+ This Source Code Form is subject to the terms of the Mozilla Public License,
16
+ v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
17
+ one at http://mozilla.org/MPL/2.0/.
18
+
19
+ ***** END LICENSE BLOCK *****
20
+ @(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/METADATA ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: certifi
3
+ Version: 2024.2.2
4
+ Summary: Python package for providing Mozilla's CA Bundle.
5
+ Home-page: https://github.com/certifi/python-certifi
6
+ Author: Kenneth Reitz
7
+ Author-email: [email protected]
8
+ License: MPL-2.0
9
+ Project-URL: Source, https://github.com/certifi/python-certifi
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
13
+ Classifier: Natural Language :: English
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3 :: Only
17
+ Classifier: Programming Language :: Python :: 3.6
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Requires-Python: >=3.6
24
+ License-File: LICENSE
25
+
26
+ Certifi: Python SSL Certificates
27
+ ================================
28
+
29
+ Certifi provides Mozilla's carefully curated collection of Root Certificates for
30
+ validating the trustworthiness of SSL certificates while verifying the identity
31
+ of TLS hosts. It has been extracted from the `Requests`_ project.
32
+
33
+ Installation
34
+ ------------
35
+
36
+ ``certifi`` is available on PyPI. Simply install it with ``pip``::
37
+
38
+ $ pip install certifi
39
+
40
+ Usage
41
+ -----
42
+
43
+ To reference the installed certificate authority (CA) bundle, you can use the
44
+ built-in function::
45
+
46
+ >>> import certifi
47
+
48
+ >>> certifi.where()
49
+ '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
50
+
51
+ Or from the command line::
52
+
53
+ $ python -m certifi
54
+ /usr/local/lib/python3.7/site-packages/certifi/cacert.pem
55
+
56
+ Enjoy!
57
+
58
+ .. _`Requests`: https://requests.readthedocs.io/en/master/
59
+
60
+ Addition/Removal of Certificates
61
+ --------------------------------
62
+
63
+ Certifi does not support any addition/removal or other modification of the
64
+ CA trust store content. This project is intended to provide a reliable and
65
+ highly portable root of trust to python deployments. Look to upstream projects
66
+ for methods to use alternate trust.
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ certifi-2024.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ certifi-2024.2.2.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
3
+ certifi-2024.2.2.dist-info/METADATA,sha256=1noreLRChpOgeSj0uJT1mehiBl8ngh33Guc7KdvzYYM,2170
4
+ certifi-2024.2.2.dist-info/RECORD,,
5
+ certifi-2024.2.2.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
6
+ certifi-2024.2.2.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
7
+ certifi/__init__.py,sha256=ljtEx-EmmPpTe2SOd5Kzsujm_lUD0fKJVnE9gzce320,94
8
+ certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
9
+ certifi/__pycache__/__init__.cpython-310.pyc,,
10
+ certifi/__pycache__/__main__.cpython-310.pyc,,
11
+ certifi/__pycache__/core.cpython-310.pyc,,
12
+ certifi/cacert.pem,sha256=ejR8qP724p-CtuR4U1WmY1wX-nVeCUD2XxWqj8e9f5I,292541
13
+ certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426
14
+ certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/certifi-2024.2.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ certifi
llmeval-env/lib/python3.10/site-packages/nltk/VERSION ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.8.1
llmeval-env/lib/python3.10/site-packages/nltk/__init__.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit (NLTK)
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ The Natural Language Toolkit (NLTK) is an open source Python library
11
+ for Natural Language Processing. A free online book is available.
12
+ (If you use the library for academic research, please cite the book.)
13
+
14
+ Steven Bird, Ewan Klein, and Edward Loper (2009).
15
+ Natural Language Processing with Python. O'Reilly Media Inc.
16
+ https://www.nltk.org/book/
17
+
18
+ isort:skip_file
19
+ """
20
+
21
+ import os
22
+
23
+ # //////////////////////////////////////////////////////
24
+ # Metadata
25
+ # //////////////////////////////////////////////////////
26
+
27
+ # Version. For each new release, the version number should be updated
28
+ # in the file VERSION.
29
+ try:
30
+ # If a VERSION file exists, use it!
31
+ version_file = os.path.join(os.path.dirname(__file__), "VERSION")
32
+ with open(version_file) as infile:
33
+ __version__ = infile.read().strip()
34
+ except NameError:
35
+ __version__ = "unknown (running code interactively?)"
36
+ except OSError as ex:
37
+ __version__ = "unknown (%s)" % ex
38
+
39
+ if __doc__ is not None: # fix for the ``python -OO``
40
+ __doc__ += "\n@version: " + __version__
41
+
42
+
43
+ # Copyright notice
44
+ __copyright__ = """\
45
+ Copyright (C) 2001-2023 NLTK Project.
46
+
47
+ Distributed and Licensed under the Apache License, Version 2.0,
48
+ which is included by reference.
49
+ """
50
+
51
+ __license__ = "Apache License, Version 2.0"
52
+ # Description of the toolkit, keywords, and the project's primary URL.
53
+ __longdescr__ = """\
54
+ The Natural Language Toolkit (NLTK) is a Python package for
55
+ natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11."""
56
+ __keywords__ = [
57
+ "NLP",
58
+ "CL",
59
+ "natural language processing",
60
+ "computational linguistics",
61
+ "parsing",
62
+ "tagging",
63
+ "tokenizing",
64
+ "syntax",
65
+ "linguistics",
66
+ "language",
67
+ "natural language",
68
+ "text analytics",
69
+ ]
70
+ __url__ = "https://www.nltk.org/"
71
+
72
+ # Maintainer, contributors, etc.
73
+ __maintainer__ = "NLTK Team"
74
+ __maintainer_email__ = "[email protected]"
75
+ __author__ = __maintainer__
76
+ __author_email__ = __maintainer_email__
77
+
78
+ # "Trove" classifiers for Python Package Index.
79
+ __classifiers__ = [
80
+ "Development Status :: 5 - Production/Stable",
81
+ "Intended Audience :: Developers",
82
+ "Intended Audience :: Education",
83
+ "Intended Audience :: Information Technology",
84
+ "Intended Audience :: Science/Research",
85
+ "License :: OSI Approved :: Apache Software License",
86
+ "Operating System :: OS Independent",
87
+ "Programming Language :: Python :: 3.7",
88
+ "Programming Language :: Python :: 3.8",
89
+ "Programming Language :: Python :: 3.9",
90
+ "Programming Language :: Python :: 3.10",
91
+ "Programming Language :: Python :: 3.11",
92
+ "Topic :: Scientific/Engineering",
93
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
94
+ "Topic :: Scientific/Engineering :: Human Machine Interfaces",
95
+ "Topic :: Scientific/Engineering :: Information Analysis",
96
+ "Topic :: Text Processing",
97
+ "Topic :: Text Processing :: Filters",
98
+ "Topic :: Text Processing :: General",
99
+ "Topic :: Text Processing :: Indexing",
100
+ "Topic :: Text Processing :: Linguistic",
101
+ ]
102
+
103
+ from nltk.internals import config_java
104
+
105
+ # support numpy from pypy
106
+ try:
107
+ import numpypy
108
+ except ImportError:
109
+ pass
110
+
111
+ # Override missing methods on environments where it cannot be used like GAE.
112
+ import subprocess
113
+
114
+ if not hasattr(subprocess, "PIPE"):
115
+
116
+ def _fake_PIPE(*args, **kwargs):
117
+ raise NotImplementedError("subprocess.PIPE is not supported.")
118
+
119
+ subprocess.PIPE = _fake_PIPE
120
+ if not hasattr(subprocess, "Popen"):
121
+
122
+ def _fake_Popen(*args, **kwargs):
123
+ raise NotImplementedError("subprocess.Popen is not supported.")
124
+
125
+ subprocess.Popen = _fake_Popen
126
+
127
+ ###########################################################
128
+ # TOP-LEVEL MODULES
129
+ ###########################################################
130
+
131
+ # Import top-level functionality into top-level namespace
132
+
133
+ from nltk.collocations import *
134
+ from nltk.decorators import decorator, memoize
135
+ from nltk.featstruct import *
136
+ from nltk.grammar import *
137
+ from nltk.probability import *
138
+ from nltk.text import *
139
+ from nltk.util import *
140
+ from nltk.jsontags import *
141
+
142
+ ###########################################################
143
+ # PACKAGES
144
+ ###########################################################
145
+
146
+ from nltk.chunk import *
147
+ from nltk.classify import *
148
+ from nltk.inference import *
149
+ from nltk.metrics import *
150
+ from nltk.parse import *
151
+ from nltk.tag import *
152
+ from nltk.tokenize import *
153
+ from nltk.translate import *
154
+ from nltk.tree import *
155
+ from nltk.sem import *
156
+ from nltk.stem import *
157
+
158
+ # Packages which can be lazily imported
159
+ # (a) we don't import *
160
+ # (b) they're slow to import or have run-time dependencies
161
+ # that can safely fail at run time
162
+
163
+ from nltk import lazyimport
164
+
165
+ app = lazyimport.LazyModule("app", locals(), globals())
166
+ chat = lazyimport.LazyModule("chat", locals(), globals())
167
+ corpus = lazyimport.LazyModule("corpus", locals(), globals())
168
+ draw = lazyimport.LazyModule("draw", locals(), globals())
169
+ toolbox = lazyimport.LazyModule("toolbox", locals(), globals())
170
+
171
+ # Optional loading
172
+
173
+ try:
174
+ import numpy
175
+ except ImportError:
176
+ pass
177
+ else:
178
+ from nltk import cluster
179
+
180
+ from nltk.downloader import download, download_shell
181
+
182
+ try:
183
+ import tkinter
184
+ except ImportError:
185
+ pass
186
+ else:
187
+ try:
188
+ from nltk.downloader import download_gui
189
+ except RuntimeError as e:
190
+ import warnings
191
+
192
+ warnings.warn(
193
+ "Corpus downloader GUI not loaded "
194
+ "(RuntimeError during import: %s)" % str(e)
195
+ )
196
+
197
+ # explicitly import all top-level modules (ensuring
198
+ # they override the same names inadvertently imported
199
+ # from a subpackage)
200
+
201
+ from nltk import ccg, chunk, classify, collocations
202
+ from nltk import data, featstruct, grammar, help, inference, metrics
203
+ from nltk import misc, parse, probability, sem, stem, wsd
204
+ from nltk import tag, tbl, text, tokenize, translate, tree, util
205
+
206
+
207
+ # FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116
208
+ def demo():
209
+ print("To run the demo code for a module, type nltk.module.demo()")
llmeval-env/lib/python3.10/site-packages/nltk/book.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Some texts for exploration in chapter 1 of the book
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.corpus import (
10
+ genesis,
11
+ gutenberg,
12
+ inaugural,
13
+ nps_chat,
14
+ treebank,
15
+ webtext,
16
+ wordnet,
17
+ )
18
+ from nltk.probability import FreqDist
19
+ from nltk.text import Text
20
+ from nltk.util import bigrams
21
+
22
+ print("*** Introductory Examples for the NLTK Book ***")
23
+ print("Loading text1, ..., text9 and sent1, ..., sent9")
24
+ print("Type the name of the text or sentence to view it.")
25
+ print("Type: 'texts()' or 'sents()' to list the materials.")
26
+
27
+ text1 = Text(gutenberg.words("melville-moby_dick.txt"))
28
+ print("text1:", text1.name)
29
+
30
+ text2 = Text(gutenberg.words("austen-sense.txt"))
31
+ print("text2:", text2.name)
32
+
33
+ text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis")
34
+ print("text3:", text3.name)
35
+
36
+ text4 = Text(inaugural.words(), name="Inaugural Address Corpus")
37
+ print("text4:", text4.name)
38
+
39
+ text5 = Text(nps_chat.words(), name="Chat Corpus")
40
+ print("text5:", text5.name)
41
+
42
+ text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail")
43
+ print("text6:", text6.name)
44
+
45
+ text7 = Text(treebank.words(), name="Wall Street Journal")
46
+ print("text7:", text7.name)
47
+
48
+ text8 = Text(webtext.words("singles.txt"), name="Personals Corpus")
49
+ print("text8:", text8.name)
50
+
51
+ text9 = Text(gutenberg.words("chesterton-thursday.txt"))
52
+ print("text9:", text9.name)
53
+
54
+
55
+ def texts():
56
+ print("text1:", text1.name)
57
+ print("text2:", text2.name)
58
+ print("text3:", text3.name)
59
+ print("text4:", text4.name)
60
+ print("text5:", text5.name)
61
+ print("text6:", text6.name)
62
+ print("text7:", text7.name)
63
+ print("text8:", text8.name)
64
+ print("text9:", text9.name)
65
+
66
+
67
+ sent1 = ["Call", "me", "Ishmael", "."]
68
+ sent2 = [
69
+ "The",
70
+ "family",
71
+ "of",
72
+ "Dashwood",
73
+ "had",
74
+ "long",
75
+ "been",
76
+ "settled",
77
+ "in",
78
+ "Sussex",
79
+ ".",
80
+ ]
81
+ sent3 = [
82
+ "In",
83
+ "the",
84
+ "beginning",
85
+ "God",
86
+ "created",
87
+ "the",
88
+ "heaven",
89
+ "and",
90
+ "the",
91
+ "earth",
92
+ ".",
93
+ ]
94
+ sent4 = [
95
+ "Fellow",
96
+ "-",
97
+ "Citizens",
98
+ "of",
99
+ "the",
100
+ "Senate",
101
+ "and",
102
+ "of",
103
+ "the",
104
+ "House",
105
+ "of",
106
+ "Representatives",
107
+ ":",
108
+ ]
109
+ sent5 = [
110
+ "I",
111
+ "have",
112
+ "a",
113
+ "problem",
114
+ "with",
115
+ "people",
116
+ "PMing",
117
+ "me",
118
+ "to",
119
+ "lol",
120
+ "JOIN",
121
+ ]
122
+ sent6 = [
123
+ "SCENE",
124
+ "1",
125
+ ":",
126
+ "[",
127
+ "wind",
128
+ "]",
129
+ "[",
130
+ "clop",
131
+ "clop",
132
+ "clop",
133
+ "]",
134
+ "KING",
135
+ "ARTHUR",
136
+ ":",
137
+ "Whoa",
138
+ "there",
139
+ "!",
140
+ ]
141
+ sent7 = [
142
+ "Pierre",
143
+ "Vinken",
144
+ ",",
145
+ "61",
146
+ "years",
147
+ "old",
148
+ ",",
149
+ "will",
150
+ "join",
151
+ "the",
152
+ "board",
153
+ "as",
154
+ "a",
155
+ "nonexecutive",
156
+ "director",
157
+ "Nov.",
158
+ "29",
159
+ ".",
160
+ ]
161
+ sent8 = [
162
+ "25",
163
+ "SEXY",
164
+ "MALE",
165
+ ",",
166
+ "seeks",
167
+ "attrac",
168
+ "older",
169
+ "single",
170
+ "lady",
171
+ ",",
172
+ "for",
173
+ "discreet",
174
+ "encounters",
175
+ ".",
176
+ ]
177
+ sent9 = [
178
+ "THE",
179
+ "suburb",
180
+ "of",
181
+ "Saffron",
182
+ "Park",
183
+ "lay",
184
+ "on",
185
+ "the",
186
+ "sunset",
187
+ "side",
188
+ "of",
189
+ "London",
190
+ ",",
191
+ "as",
192
+ "red",
193
+ "and",
194
+ "ragged",
195
+ "as",
196
+ "a",
197
+ "cloud",
198
+ "of",
199
+ "sunset",
200
+ ".",
201
+ ]
202
+
203
+
204
+ def sents():
205
+ print("sent1:", " ".join(sent1))
206
+ print("sent2:", " ".join(sent2))
207
+ print("sent3:", " ".join(sent3))
208
+ print("sent4:", " ".join(sent4))
209
+ print("sent5:", " ".join(sent5))
210
+ print("sent6:", " ".join(sent6))
211
+ print("sent7:", " ".join(sent7))
212
+ print("sent8:", " ".join(sent8))
213
+ print("sent9:", " ".join(sent9))
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Combinatory Categorial Grammar.
10
+
11
+ For more information see nltk/doc/contrib/ccg/ccg.pdf
12
+ """
13
+
14
+ from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge
15
+ from nltk.ccg.combinator import (
16
+ BackwardApplication,
17
+ BackwardBx,
18
+ BackwardCombinator,
19
+ BackwardComposition,
20
+ BackwardSx,
21
+ BackwardT,
22
+ DirectedBinaryCombinator,
23
+ ForwardApplication,
24
+ ForwardCombinator,
25
+ ForwardComposition,
26
+ ForwardSubstitution,
27
+ ForwardT,
28
+ UndirectedBinaryCombinator,
29
+ UndirectedComposition,
30
+ UndirectedFunctionApplication,
31
+ UndirectedSubstitution,
32
+ UndirectedTypeRaise,
33
+ )
34
+ from nltk.ccg.lexicon import CCGLexicon
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (975 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc ADDED
Binary file (9.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc ADDED
Binary file (7.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/ccg/api.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: CCG Categories
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from abc import ABCMeta, abstractmethod
9
+ from functools import total_ordering
10
+
11
+ from nltk.internals import raise_unorderable_types
12
+
13
+
14
+ @total_ordering
15
+ class AbstractCCGCategory(metaclass=ABCMeta):
16
+ """
17
+ Interface for categories in combinatory grammars.
18
+ """
19
+
20
+ @abstractmethod
21
+ def is_primitive(self):
22
+ """
23
+ Returns true if the category is primitive.
24
+ """
25
+
26
+ @abstractmethod
27
+ def is_function(self):
28
+ """
29
+ Returns true if the category is a function application.
30
+ """
31
+
32
+ @abstractmethod
33
+ def is_var(self):
34
+ """
35
+ Returns true if the category is a variable.
36
+ """
37
+
38
+ @abstractmethod
39
+ def substitute(self, substitutions):
40
+ """
41
+ Takes a set of (var, category) substitutions, and replaces every
42
+ occurrence of the variable with the corresponding category.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_unify(self, other):
47
+ """
48
+ Determines whether two categories can be unified.
49
+ - Returns None if they cannot be unified
50
+ - Returns a list of necessary substitutions if they can.
51
+ """
52
+
53
+ # Utility functions: comparison, strings and hashing.
54
+ @abstractmethod
55
+ def __str__(self):
56
+ pass
57
+
58
+ def __eq__(self, other):
59
+ return (
60
+ self.__class__ is other.__class__
61
+ and self._comparison_key == other._comparison_key
62
+ )
63
+
64
+ def __ne__(self, other):
65
+ return not self == other
66
+
67
+ def __lt__(self, other):
68
+ if not isinstance(other, AbstractCCGCategory):
69
+ raise_unorderable_types("<", self, other)
70
+ if self.__class__ is other.__class__:
71
+ return self._comparison_key < other._comparison_key
72
+ else:
73
+ return self.__class__.__name__ < other.__class__.__name__
74
+
75
+ def __hash__(self):
76
+ try:
77
+ return self._hash
78
+ except AttributeError:
79
+ self._hash = hash(self._comparison_key)
80
+ return self._hash
81
+
82
+
83
+ class CCGVar(AbstractCCGCategory):
84
+ """
85
+ Class representing a variable CCG category.
86
+ Used for conjunctions (and possibly type-raising, if implemented as a
87
+ unary rule).
88
+ """
89
+
90
+ _maxID = 0
91
+
92
+ def __init__(self, prim_only=False):
93
+ """Initialize a variable (selects a new identifier)
94
+
95
+ :param prim_only: a boolean that determines whether the variable is
96
+ restricted to primitives
97
+ :type prim_only: bool
98
+ """
99
+ self._id = self.new_id()
100
+ self._prim_only = prim_only
101
+ self._comparison_key = self._id
102
+
103
+ @classmethod
104
+ def new_id(cls):
105
+ """
106
+ A class method allowing generation of unique variable identifiers.
107
+ """
108
+ cls._maxID = cls._maxID + 1
109
+ return cls._maxID - 1
110
+
111
+ @classmethod
112
+ def reset_id(cls):
113
+ cls._maxID = 0
114
+
115
+ def is_primitive(self):
116
+ return False
117
+
118
+ def is_function(self):
119
+ return False
120
+
121
+ def is_var(self):
122
+ return True
123
+
124
+ def substitute(self, substitutions):
125
+ """If there is a substitution corresponding to this variable,
126
+ return the substituted category.
127
+ """
128
+ for (var, cat) in substitutions:
129
+ if var == self:
130
+ return cat
131
+ return self
132
+
133
+ def can_unify(self, other):
134
+ """If the variable can be replaced with other
135
+ a substitution is returned.
136
+ """
137
+ if other.is_primitive() or not self._prim_only:
138
+ return [(self, other)]
139
+ return None
140
+
141
+ def id(self):
142
+ return self._id
143
+
144
+ def __str__(self):
145
+ return "_var" + str(self._id)
146
+
147
+
148
+ @total_ordering
149
+ class Direction:
150
+ """
151
+ Class representing the direction of a function application.
152
+ Also contains maintains information as to which combinators
153
+ may be used with the category.
154
+ """
155
+
156
+ def __init__(self, dir, restrictions):
157
+ self._dir = dir
158
+ self._restrs = restrictions
159
+ self._comparison_key = (dir, tuple(restrictions))
160
+
161
+ # Testing the application direction
162
+ def is_forward(self):
163
+ return self._dir == "/"
164
+
165
+ def is_backward(self):
166
+ return self._dir == "\\"
167
+
168
+ def dir(self):
169
+ return self._dir
170
+
171
+ def restrs(self):
172
+ """A list of restrictions on the combinators.
173
+ '.' denotes that permuting operations are disallowed
174
+ ',' denotes that function composition is disallowed
175
+ '_' denotes that the direction has variable restrictions.
176
+ (This is redundant in the current implementation of type-raising)
177
+ """
178
+ return self._restrs
179
+
180
+ def is_variable(self):
181
+ return self._restrs == "_"
182
+
183
+ # Unification and substitution of variable directions.
184
+ # Used only if type-raising is implemented as a unary rule, as it
185
+ # must inherit restrictions from the argument category.
186
+ def can_unify(self, other):
187
+ if other.is_variable():
188
+ return [("_", self.restrs())]
189
+ elif self.is_variable():
190
+ return [("_", other.restrs())]
191
+ else:
192
+ if self.restrs() == other.restrs():
193
+ return []
194
+ return None
195
+
196
+ def substitute(self, subs):
197
+ if not self.is_variable():
198
+ return self
199
+
200
+ for (var, restrs) in subs:
201
+ if var == "_":
202
+ return Direction(self._dir, restrs)
203
+ return self
204
+
205
+ # Testing permitted combinators
206
+ def can_compose(self):
207
+ return "," not in self._restrs
208
+
209
+ def can_cross(self):
210
+ return "." not in self._restrs
211
+
212
+ def __eq__(self, other):
213
+ return (
214
+ self.__class__ is other.__class__
215
+ and self._comparison_key == other._comparison_key
216
+ )
217
+
218
+ def __ne__(self, other):
219
+ return not self == other
220
+
221
+ def __lt__(self, other):
222
+ if not isinstance(other, Direction):
223
+ raise_unorderable_types("<", self, other)
224
+ if self.__class__ is other.__class__:
225
+ return self._comparison_key < other._comparison_key
226
+ else:
227
+ return self.__class__.__name__ < other.__class__.__name__
228
+
229
+ def __hash__(self):
230
+ try:
231
+ return self._hash
232
+ except AttributeError:
233
+ self._hash = hash(self._comparison_key)
234
+ return self._hash
235
+
236
+ def __str__(self):
237
+ r_str = ""
238
+ for r in self._restrs:
239
+ r_str = r_str + "%s" % r
240
+ return f"{self._dir}{r_str}"
241
+
242
+ # The negation operator reverses the direction of the application
243
+ def __neg__(self):
244
+ if self._dir == "/":
245
+ return Direction("\\", self._restrs)
246
+ else:
247
+ return Direction("/", self._restrs)
248
+
249
+
250
+ class PrimitiveCategory(AbstractCCGCategory):
251
+ """
252
+ Class representing primitive categories.
253
+ Takes a string representation of the category, and a
254
+ list of strings specifying the morphological subcategories.
255
+ """
256
+
257
+ def __init__(self, categ, restrictions=[]):
258
+ self._categ = categ
259
+ self._restrs = restrictions
260
+ self._comparison_key = (categ, tuple(restrictions))
261
+
262
+ def is_primitive(self):
263
+ return True
264
+
265
+ def is_function(self):
266
+ return False
267
+
268
+ def is_var(self):
269
+ return False
270
+
271
+ def restrs(self):
272
+ return self._restrs
273
+
274
+ def categ(self):
275
+ return self._categ
276
+
277
+ # Substitution does nothing to a primitive category
278
+ def substitute(self, subs):
279
+ return self
280
+
281
+ # A primitive can be unified with a class of the same
282
+ # base category, given that the other category shares all
283
+ # of its subclasses, or with a variable.
284
+ def can_unify(self, other):
285
+ if not other.is_primitive():
286
+ return None
287
+ if other.is_var():
288
+ return [(other, self)]
289
+ if other.categ() == self.categ():
290
+ for restr in self._restrs:
291
+ if restr not in other.restrs():
292
+ return None
293
+ return []
294
+ return None
295
+
296
+ def __str__(self):
297
+ if self._restrs == []:
298
+ return "%s" % self._categ
299
+ restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs)
300
+ return f"{self._categ}{restrictions}"
301
+
302
+
303
+ class FunctionalCategory(AbstractCCGCategory):
304
+ """
305
+ Class that represents a function application category.
306
+ Consists of argument and result categories, together with
307
+ an application direction.
308
+ """
309
+
310
+ def __init__(self, res, arg, dir):
311
+ self._res = res
312
+ self._arg = arg
313
+ self._dir = dir
314
+ self._comparison_key = (arg, dir, res)
315
+
316
+ def is_primitive(self):
317
+ return False
318
+
319
+ def is_function(self):
320
+ return True
321
+
322
+ def is_var(self):
323
+ return False
324
+
325
+ # Substitution returns the category consisting of the
326
+ # substitution applied to each of its constituents.
327
+ def substitute(self, subs):
328
+ sub_res = self._res.substitute(subs)
329
+ sub_dir = self._dir.substitute(subs)
330
+ sub_arg = self._arg.substitute(subs)
331
+ return FunctionalCategory(sub_res, sub_arg, self._dir)
332
+
333
+ # A function can unify with another function, so long as its
334
+ # constituents can unify, or with an unrestricted variable.
335
+ def can_unify(self, other):
336
+ if other.is_var():
337
+ return [(other, self)]
338
+ if other.is_function():
339
+ sa = self._res.can_unify(other.res())
340
+ sd = self._dir.can_unify(other.dir())
341
+ if sa is not None and sd is not None:
342
+ sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa))
343
+ if sb is not None:
344
+ return sa + sb
345
+ return None
346
+
347
+ # Constituent accessors
348
+ def arg(self):
349
+ return self._arg
350
+
351
+ def res(self):
352
+ return self._res
353
+
354
+ def dir(self):
355
+ return self._dir
356
+
357
+ def __str__(self):
358
+ return f"({self._res}{self._dir}{self._arg})"
llmeval-env/lib/python3.10/site-packages/nltk/ccg/chart.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The lexicon is constructed by calling
10
+ ``lexicon.fromstring(<lexicon string>)``.
11
+
12
+ In order to construct a parser, you also need a rule set.
13
+ The standard English rules are provided in chart as
14
+ ``chart.DefaultRuleSet``.
15
+
16
+ The parser can then be constructed by calling, for example:
17
+ ``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
18
+
19
+ Parsing is then performed by running
20
+ ``parser.parse(<sentence>.split())``.
21
+
22
+ While this returns a list of trees, the default representation
23
+ of the produced trees is not very enlightening, particularly
24
+ given that it uses the same tree class as the CFG parsers.
25
+ It is probably better to call:
26
+ ``chart.printCCGDerivation(<parse tree extracted from list>)``
27
+ which should print a nice representation of the derivation.
28
+
29
+ This entire process is shown far more clearly in the demonstration:
30
+ python chart.py
31
+ """
32
+
33
+ import itertools
34
+
35
+ from nltk.ccg.combinator import *
36
+ from nltk.ccg.combinator import (
37
+ BackwardApplication,
38
+ BackwardBx,
39
+ BackwardComposition,
40
+ BackwardSx,
41
+ BackwardT,
42
+ ForwardApplication,
43
+ ForwardComposition,
44
+ ForwardSubstitution,
45
+ ForwardT,
46
+ )
47
+ from nltk.ccg.lexicon import Token, fromstring
48
+ from nltk.ccg.logic import *
49
+ from nltk.parse import ParserI
50
+ from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
51
+ from nltk.sem.logic import *
52
+ from nltk.tree import Tree
53
+
54
+
55
+ # Based on the EdgeI class from NLTK.
56
+ # A number of the properties of the EdgeI interface don't
57
+ # transfer well to CCGs, however.
58
+ class CCGEdge(EdgeI):
59
+ def __init__(self, span, categ, rule):
60
+ self._span = span
61
+ self._categ = categ
62
+ self._rule = rule
63
+ self._comparison_key = (span, categ, rule)
64
+
65
+ # Accessors
66
+ def lhs(self):
67
+ return self._categ
68
+
69
+ def span(self):
70
+ return self._span
71
+
72
+ def start(self):
73
+ return self._span[0]
74
+
75
+ def end(self):
76
+ return self._span[1]
77
+
78
+ def length(self):
79
+ return self._span[1] - self.span[0]
80
+
81
+ def rhs(self):
82
+ return ()
83
+
84
+ def dot(self):
85
+ return 0
86
+
87
+ def is_complete(self):
88
+ return True
89
+
90
+ def is_incomplete(self):
91
+ return False
92
+
93
+ def nextsym(self):
94
+ return None
95
+
96
+ def categ(self):
97
+ return self._categ
98
+
99
+ def rule(self):
100
+ return self._rule
101
+
102
+
103
+ class CCGLeafEdge(EdgeI):
104
+ """
105
+ Class representing leaf edges in a CCG derivation.
106
+ """
107
+
108
+ def __init__(self, pos, token, leaf):
109
+ self._pos = pos
110
+ self._token = token
111
+ self._leaf = leaf
112
+ self._comparison_key = (pos, token.categ(), leaf)
113
+
114
+ # Accessors
115
+ def lhs(self):
116
+ return self._token.categ()
117
+
118
+ def span(self):
119
+ return (self._pos, self._pos + 1)
120
+
121
+ def start(self):
122
+ return self._pos
123
+
124
+ def end(self):
125
+ return self._pos + 1
126
+
127
+ def length(self):
128
+ return 1
129
+
130
+ def rhs(self):
131
+ return self._leaf
132
+
133
+ def dot(self):
134
+ return 0
135
+
136
+ def is_complete(self):
137
+ return True
138
+
139
+ def is_incomplete(self):
140
+ return False
141
+
142
+ def nextsym(self):
143
+ return None
144
+
145
+ def token(self):
146
+ return self._token
147
+
148
+ def categ(self):
149
+ return self._token.categ()
150
+
151
+ def leaf(self):
152
+ return self._leaf
153
+
154
+
155
+ class BinaryCombinatorRule(AbstractChartRule):
156
+ """
157
+ Class implementing application of a binary combinator to a chart.
158
+ Takes the directed combinator to apply.
159
+ """
160
+
161
+ NUMEDGES = 2
162
+
163
+ def __init__(self, combinator):
164
+ self._combinator = combinator
165
+
166
+ # Apply a combinator
167
+ def apply(self, chart, grammar, left_edge, right_edge):
168
+ # The left & right edges must be touching.
169
+ if not (left_edge.end() == right_edge.start()):
170
+ return
171
+
172
+ # Check if the two edges are permitted to combine.
173
+ # If so, generate the corresponding edge.
174
+ if self._combinator.can_combine(left_edge.categ(), right_edge.categ()):
175
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
176
+ new_edge = CCGEdge(
177
+ span=(left_edge.start(), right_edge.end()),
178
+ categ=res,
179
+ rule=self._combinator,
180
+ )
181
+ if chart.insert(new_edge, (left_edge, right_edge)):
182
+ yield new_edge
183
+
184
+ # The representation of the combinator (for printing derivations)
185
+ def __str__(self):
186
+ return "%s" % self._combinator
187
+
188
+
189
+ # Type-raising must be handled slightly differently to the other rules, as the
190
+ # resulting rules only span a single edge, rather than both edges.
191
+
192
+
193
+ class ForwardTypeRaiseRule(AbstractChartRule):
194
+ """
195
+ Class for applying forward type raising
196
+ """
197
+
198
+ NUMEDGES = 2
199
+
200
+ def __init__(self):
201
+ self._combinator = ForwardT
202
+
203
+ def apply(self, chart, grammar, left_edge, right_edge):
204
+ if not (left_edge.end() == right_edge.start()):
205
+ return
206
+
207
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
208
+ new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator)
209
+ if chart.insert(new_edge, (left_edge,)):
210
+ yield new_edge
211
+
212
+ def __str__(self):
213
+ return "%s" % self._combinator
214
+
215
+
216
+ class BackwardTypeRaiseRule(AbstractChartRule):
217
+ """
218
+ Class for applying backward type raising.
219
+ """
220
+
221
+ NUMEDGES = 2
222
+
223
+ def __init__(self):
224
+ self._combinator = BackwardT
225
+
226
+ def apply(self, chart, grammar, left_edge, right_edge):
227
+ if not (left_edge.end() == right_edge.start()):
228
+ return
229
+
230
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
231
+ new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator)
232
+ if chart.insert(new_edge, (right_edge,)):
233
+ yield new_edge
234
+
235
+ def __str__(self):
236
+ return "%s" % self._combinator
237
+
238
+
239
+ # Common sets of combinators used for English derivations.
240
+ ApplicationRuleSet = [
241
+ BinaryCombinatorRule(ForwardApplication),
242
+ BinaryCombinatorRule(BackwardApplication),
243
+ ]
244
+ CompositionRuleSet = [
245
+ BinaryCombinatorRule(ForwardComposition),
246
+ BinaryCombinatorRule(BackwardComposition),
247
+ BinaryCombinatorRule(BackwardBx),
248
+ ]
249
+ SubstitutionRuleSet = [
250
+ BinaryCombinatorRule(ForwardSubstitution),
251
+ BinaryCombinatorRule(BackwardSx),
252
+ ]
253
+ TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
254
+
255
+ # The standard English rule set.
256
+ DefaultRuleSet = (
257
+ ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet
258
+ )
259
+
260
+
261
+ class CCGChartParser(ParserI):
262
+ """
263
+ Chart parser for CCGs.
264
+ Based largely on the ChartParser class from NLTK.
265
+ """
266
+
267
+ def __init__(self, lexicon, rules, trace=0):
268
+ self._lexicon = lexicon
269
+ self._rules = rules
270
+ self._trace = trace
271
+
272
+ def lexicon(self):
273
+ return self._lexicon
274
+
275
+ # Implements the CYK algorithm
276
+ def parse(self, tokens):
277
+ tokens = list(tokens)
278
+ chart = CCGChart(list(tokens))
279
+ lex = self._lexicon
280
+
281
+ # Initialize leaf edges.
282
+ for index in range(chart.num_leaves()):
283
+ for token in lex.categories(chart.leaf(index)):
284
+ new_edge = CCGLeafEdge(index, token, chart.leaf(index))
285
+ chart.insert(new_edge, ())
286
+
287
+ # Select a span for the new edges
288
+ for span in range(2, chart.num_leaves() + 1):
289
+ for start in range(0, chart.num_leaves() - span + 1):
290
+ # Try all possible pairs of edges that could generate
291
+ # an edge for that span
292
+ for part in range(1, span):
293
+ lstart = start
294
+ mid = start + part
295
+ rend = start + span
296
+
297
+ for left in chart.select(span=(lstart, mid)):
298
+ for right in chart.select(span=(mid, rend)):
299
+ # Generate all possible combinations of the two edges
300
+ for rule in self._rules:
301
+ edges_added_by_rule = 0
302
+ for newedge in rule.apply(chart, lex, left, right):
303
+ edges_added_by_rule += 1
304
+
305
+ # Output the resulting parses
306
+ return chart.parses(lex.start())
307
+
308
+
309
+ class CCGChart(Chart):
310
+ def __init__(self, tokens):
311
+ Chart.__init__(self, tokens)
312
+
313
+ # Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
314
+ # constructed slightly differently to those in the default Chart class, so it has to
315
+ # be reimplemented
316
+ def _trees(self, edge, complete, memo, tree_class):
317
+ assert complete, "CCGChart cannot build incomplete trees"
318
+
319
+ if edge in memo:
320
+ return memo[edge]
321
+
322
+ if isinstance(edge, CCGLeafEdge):
323
+ word = tree_class(edge.token(), [self._tokens[edge.start()]])
324
+ leaf = tree_class((edge.token(), "Leaf"), [word])
325
+ memo[edge] = [leaf]
326
+ return [leaf]
327
+
328
+ memo[edge] = []
329
+ trees = []
330
+
331
+ for cpl in self.child_pointer_lists(edge):
332
+ child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
333
+ for children in itertools.product(*child_choices):
334
+ lhs = (
335
+ Token(
336
+ self._tokens[edge.start() : edge.end()],
337
+ edge.lhs(),
338
+ compute_semantics(children, edge),
339
+ ),
340
+ str(edge.rule()),
341
+ )
342
+ trees.append(tree_class(lhs, children))
343
+
344
+ memo[edge] = trees
345
+ return trees
346
+
347
+
348
+ def compute_semantics(children, edge):
349
+ if children[0].label()[0].semantics() is None:
350
+ return None
351
+
352
+ if len(children) == 2:
353
+ if isinstance(edge.rule(), BackwardCombinator):
354
+ children = [children[1], children[0]]
355
+
356
+ combinator = edge.rule()._combinator
357
+ function = children[0].label()[0].semantics()
358
+ argument = children[1].label()[0].semantics()
359
+
360
+ if isinstance(combinator, UndirectedFunctionApplication):
361
+ return compute_function_semantics(function, argument)
362
+ elif isinstance(combinator, UndirectedComposition):
363
+ return compute_composition_semantics(function, argument)
364
+ elif isinstance(combinator, UndirectedSubstitution):
365
+ return compute_substitution_semantics(function, argument)
366
+ else:
367
+ raise AssertionError("Unsupported combinator '" + combinator + "'")
368
+ else:
369
+ return compute_type_raised_semantics(children[0].label()[0].semantics())
370
+
371
+
372
+ # --------
373
+ # Displaying derivations
374
+ # --------
375
+ def printCCGDerivation(tree):
376
+ # Get the leaves and initial categories
377
+ leafcats = tree.pos()
378
+ leafstr = ""
379
+ catstr = ""
380
+
381
+ # Construct a string with both the leaf word and corresponding
382
+ # category aligned.
383
+ for (leaf, cat) in leafcats:
384
+ str_cat = "%s" % cat
385
+ nextlen = 2 + max(len(leaf), len(str_cat))
386
+ lcatlen = (nextlen - len(str_cat)) // 2
387
+ rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
388
+ catstr += " " * lcatlen + str_cat + " " * rcatlen
389
+ lleaflen = (nextlen - len(leaf)) // 2
390
+ rleaflen = lleaflen + (nextlen - len(leaf)) % 2
391
+ leafstr += " " * lleaflen + leaf + " " * rleaflen
392
+ print(leafstr.rstrip())
393
+ print(catstr.rstrip())
394
+
395
+ # Display the derivation steps
396
+ printCCGTree(0, tree)
397
+
398
+
399
+ # Prints the sequence of derivation steps.
400
+ def printCCGTree(lwidth, tree):
401
+ rwidth = lwidth
402
+
403
+ # Is a leaf (word).
404
+ # Increment the span by the space occupied by the leaf.
405
+ if not isinstance(tree, Tree):
406
+ return 2 + lwidth + len(tree)
407
+
408
+ # Find the width of the current derivation step
409
+ for child in tree:
410
+ rwidth = max(rwidth, printCCGTree(rwidth, child))
411
+
412
+ # Is a leaf node.
413
+ # Don't print anything, but account for the space occupied.
414
+ if not isinstance(tree.label(), tuple):
415
+ return max(
416
+ rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0])
417
+ )
418
+
419
+ (token, op) = tree.label()
420
+
421
+ if op == "Leaf":
422
+ return rwidth
423
+
424
+ # Pad to the left with spaces, followed by a sequence of '-'
425
+ # and the derivation rule.
426
+ print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op)
427
+ # Print the resulting category on a new line.
428
+ str_res = "%s" % (token.categ())
429
+ if token.semantics() is not None:
430
+ str_res += " {" + str(token.semantics()) + "}"
431
+ respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
432
+ print(respadlen * " " + str_res)
433
+ return rwidth
434
+
435
+
436
+ ### Demonstration code
437
+
438
+ # Construct the lexicon
439
+ lex = fromstring(
440
+ """
441
+ :- S, NP, N, VP # Primitive categories, S is the target primitive
442
+
443
+ Det :: NP/N # Family of words
444
+ Pro :: NP
445
+ TV :: VP/NP
446
+ Modal :: (S\\NP)/VP # Backslashes need to be escaped
447
+
448
+ I => Pro # Word -> Category mapping
449
+ you => Pro
450
+
451
+ the => Det
452
+
453
+ # Variables have the special keyword 'var'
454
+ # '.' prevents permutation
455
+ # ',' prevents composition
456
+ and => var\\.,var/.,var
457
+
458
+ which => (N\\N)/(S/NP)
459
+
460
+ will => Modal # Categories can be either explicit, or families.
461
+ might => Modal
462
+
463
+ cook => TV
464
+ eat => TV
465
+
466
+ mushrooms => N
467
+ parsnips => N
468
+ bacon => N
469
+ """
470
+ )
471
+
472
+
473
+ def demo():
474
+ parser = CCGChartParser(lex, DefaultRuleSet)
475
+ for parse in parser.parse("I might cook and eat the bacon".split()):
476
+ printCCGDerivation(parse)
477
+
478
+
479
+ if __name__ == "__main__":
480
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/ccg/combinator.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ CCG Combinators
9
+ """
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ from nltk.ccg.api import FunctionalCategory
14
+
15
+
16
+ class UndirectedBinaryCombinator(metaclass=ABCMeta):
17
+ """
18
+ Abstract class for representing a binary combinator.
19
+ Merely defines functions for checking if the function and argument
20
+ are able to be combined, and what the resulting category is.
21
+
22
+ Note that as no assumptions are made as to direction, the unrestricted
23
+ combinators can perform all backward, forward and crossed variations
24
+ of the combinators; these restrictions must be added in the rule
25
+ class.
26
+ """
27
+
28
+ @abstractmethod
29
+ def can_combine(self, function, argument):
30
+ pass
31
+
32
+ @abstractmethod
33
+ def combine(self, function, argument):
34
+ pass
35
+
36
+
37
+ class DirectedBinaryCombinator(metaclass=ABCMeta):
38
+ """
39
+ Wrapper for the undirected binary combinator.
40
+ It takes left and right categories, and decides which is to be
41
+ the function, and which the argument.
42
+ It then decides whether or not they can be combined.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_combine(self, left, right):
47
+ pass
48
+
49
+ @abstractmethod
50
+ def combine(self, left, right):
51
+ pass
52
+
53
+
54
+ class ForwardCombinator(DirectedBinaryCombinator):
55
+ """
56
+ Class representing combinators where the primary functor is on the left.
57
+
58
+ Takes an undirected combinator, and a predicate which adds constraints
59
+ restricting the cases in which it may apply.
60
+ """
61
+
62
+ def __init__(self, combinator, predicate, suffix=""):
63
+ self._combinator = combinator
64
+ self._predicate = predicate
65
+ self._suffix = suffix
66
+
67
+ def can_combine(self, left, right):
68
+ return self._combinator.can_combine(left, right) and self._predicate(
69
+ left, right
70
+ )
71
+
72
+ def combine(self, left, right):
73
+ yield from self._combinator.combine(left, right)
74
+
75
+ def __str__(self):
76
+ return f">{self._combinator}{self._suffix}"
77
+
78
+
79
+ class BackwardCombinator(DirectedBinaryCombinator):
80
+ """
81
+ The backward equivalent of the ForwardCombinator class.
82
+ """
83
+
84
+ def __init__(self, combinator, predicate, suffix=""):
85
+ self._combinator = combinator
86
+ self._predicate = predicate
87
+ self._suffix = suffix
88
+
89
+ def can_combine(self, left, right):
90
+ return self._combinator.can_combine(right, left) and self._predicate(
91
+ left, right
92
+ )
93
+
94
+ def combine(self, left, right):
95
+ yield from self._combinator.combine(right, left)
96
+
97
+ def __str__(self):
98
+ return f"<{self._combinator}{self._suffix}"
99
+
100
+
101
+ class UndirectedFunctionApplication(UndirectedBinaryCombinator):
102
+ """
103
+ Class representing function application.
104
+ Implements rules of the form:
105
+ X/Y Y -> X (>)
106
+ And the corresponding backwards application rule
107
+ """
108
+
109
+ def can_combine(self, function, argument):
110
+ if not function.is_function():
111
+ return False
112
+
113
+ return not function.arg().can_unify(argument) is None
114
+
115
+ def combine(self, function, argument):
116
+ if not function.is_function():
117
+ return
118
+
119
+ subs = function.arg().can_unify(argument)
120
+ if subs is None:
121
+ return
122
+
123
+ yield function.res().substitute(subs)
124
+
125
+ def __str__(self):
126
+ return ""
127
+
128
+
129
+ # Predicates for function application.
130
+
131
+ # Ensures the left functor takes an argument on the right
132
+ def forwardOnly(left, right):
133
+ return left.dir().is_forward()
134
+
135
+
136
+ # Ensures the right functor takes an argument on the left
137
+ def backwardOnly(left, right):
138
+ return right.dir().is_backward()
139
+
140
+
141
+ # Application combinator instances
142
+ ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly)
143
+ BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly)
144
+
145
+
146
+ class UndirectedComposition(UndirectedBinaryCombinator):
147
+ """
148
+ Functional composition (harmonic) combinator.
149
+ Implements rules of the form
150
+ X/Y Y/Z -> X/Z (B>)
151
+ And the corresponding backwards and crossed variations.
152
+ """
153
+
154
+ def can_combine(self, function, argument):
155
+ # Can only combine two functions, and both functions must
156
+ # allow composition.
157
+ if not (function.is_function() and argument.is_function()):
158
+ return False
159
+ if function.dir().can_compose() and argument.dir().can_compose():
160
+ return not function.arg().can_unify(argument.res()) is None
161
+ return False
162
+
163
+ def combine(self, function, argument):
164
+ if not (function.is_function() and argument.is_function()):
165
+ return
166
+ if function.dir().can_compose() and argument.dir().can_compose():
167
+ subs = function.arg().can_unify(argument.res())
168
+ if subs is not None:
169
+ yield FunctionalCategory(
170
+ function.res().substitute(subs),
171
+ argument.arg().substitute(subs),
172
+ argument.dir(),
173
+ )
174
+
175
+ def __str__(self):
176
+ return "B"
177
+
178
+
179
+ # Predicates for restricting application of straight composition.
180
+ def bothForward(left, right):
181
+ return left.dir().is_forward() and right.dir().is_forward()
182
+
183
+
184
+ def bothBackward(left, right):
185
+ return left.dir().is_backward() and right.dir().is_backward()
186
+
187
+
188
+ # Predicates for crossed composition
189
+ def crossedDirs(left, right):
190
+ return left.dir().is_forward() and right.dir().is_backward()
191
+
192
+
193
+ def backwardBxConstraint(left, right):
194
+ # The functors must be crossed inwards
195
+ if not crossedDirs(left, right):
196
+ return False
197
+ # Permuting combinators must be allowed
198
+ if not left.dir().can_cross() and right.dir().can_cross():
199
+ return False
200
+ # The resulting argument category is restricted to be primitive
201
+ return left.arg().is_primitive()
202
+
203
+
204
+ # Straight composition combinators
205
+ ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly)
206
+ BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly)
207
+
208
+ # Backward crossed composition
209
+ BackwardBx = BackwardCombinator(
210
+ UndirectedComposition(), backwardBxConstraint, suffix="x"
211
+ )
212
+
213
+
214
+ class UndirectedSubstitution(UndirectedBinaryCombinator):
215
+ r"""
216
+ Substitution (permutation) combinator.
217
+ Implements rules of the form
218
+ Y/Z (X\Y)/Z -> X/Z (<Sx)
219
+ And other variations.
220
+ """
221
+
222
+ def can_combine(self, function, argument):
223
+ if function.is_primitive() or argument.is_primitive():
224
+ return False
225
+
226
+ # These could potentially be moved to the predicates, as the
227
+ # constraints may not be general to all languages.
228
+ if function.res().is_primitive():
229
+ return False
230
+ if not function.arg().is_primitive():
231
+ return False
232
+
233
+ if not (function.dir().can_compose() and argument.dir().can_compose()):
234
+ return False
235
+ return (function.res().arg() == argument.res()) and (
236
+ function.arg() == argument.arg()
237
+ )
238
+
239
+ def combine(self, function, argument):
240
+ if self.can_combine(function, argument):
241
+ yield FunctionalCategory(
242
+ function.res().res(), argument.arg(), argument.dir()
243
+ )
244
+
245
+ def __str__(self):
246
+ return "S"
247
+
248
+
249
+ # Predicate for forward substitution
250
+ def forwardSConstraint(left, right):
251
+ if not bothForward(left, right):
252
+ return False
253
+ return left.res().dir().is_forward() and left.arg().is_primitive()
254
+
255
+
256
+ # Predicate for backward crossed substitution
257
+ def backwardSxConstraint(left, right):
258
+ if not left.dir().can_cross() and right.dir().can_cross():
259
+ return False
260
+ if not bothForward(left, right):
261
+ return False
262
+ return right.res().dir().is_backward() and right.arg().is_primitive()
263
+
264
+
265
+ # Instances of substitution combinators
266
+ ForwardSubstitution = ForwardCombinator(UndirectedSubstitution(), forwardSConstraint)
267
+ BackwardSx = BackwardCombinator(UndirectedSubstitution(), backwardSxConstraint, "x")
268
+
269
+
270
+ # Retrieves the left-most functional category.
271
+ # ie, (N\N)/(S/NP) => N\N
272
+ def innermostFunction(categ):
273
+ while categ.res().is_function():
274
+ categ = categ.res()
275
+ return categ
276
+
277
+
278
+ class UndirectedTypeRaise(UndirectedBinaryCombinator):
279
+ """
280
+ Undirected combinator for type raising.
281
+ """
282
+
283
+ def can_combine(self, function, arg):
284
+ # The argument must be a function.
285
+ # The restriction that arg.res() must be a function
286
+ # merely reduces redundant type-raising; if arg.res() is
287
+ # primitive, we have:
288
+ # X Y\X =>(<T) Y/(Y\X) Y\X =>(>) Y
289
+ # which is equivalent to
290
+ # X Y\X =>(<) Y
291
+ if not (arg.is_function() and arg.res().is_function()):
292
+ return False
293
+
294
+ arg = innermostFunction(arg)
295
+
296
+ # left, arg_categ are undefined!
297
+ subs = left.can_unify(arg_categ.arg())
298
+ if subs is not None:
299
+ return True
300
+ return False
301
+
302
+ def combine(self, function, arg):
303
+ if not (
304
+ function.is_primitive() and arg.is_function() and arg.res().is_function()
305
+ ):
306
+ return
307
+
308
+ # Type-raising matches only the innermost application.
309
+ arg = innermostFunction(arg)
310
+
311
+ subs = function.can_unify(arg.arg())
312
+ if subs is not None:
313
+ xcat = arg.res().substitute(subs)
314
+ yield FunctionalCategory(
315
+ xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir())
316
+ )
317
+
318
+ def __str__(self):
319
+ return "T"
320
+
321
+
322
+ # Predicates for type-raising
323
+ # The direction of the innermost category must be towards
324
+ # the primary functor.
325
+ # The restriction that the variable must be primitive is not
326
+ # common to all versions of CCGs; some authors have other restrictions.
327
+ def forwardTConstraint(left, right):
328
+ arg = innermostFunction(right)
329
+ return arg.dir().is_backward() and arg.res().is_primitive()
330
+
331
+
332
+ def backwardTConstraint(left, right):
333
+ arg = innermostFunction(left)
334
+ return arg.dir().is_forward() and arg.res().is_primitive()
335
+
336
+
337
+ # Instances of type-raising combinators
338
+ ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint)
339
+ BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint)
llmeval-env/lib/python3.10/site-packages/nltk/ccg/logic.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tanin Na Nakorn (@tanin)
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ Helper functions for CCG semantics computation
9
+ """
10
+
11
+ from nltk.sem.logic import *
12
+
13
+
14
+ def compute_type_raised_semantics(semantics):
15
+ core = semantics
16
+ parent = None
17
+ while isinstance(core, LambdaExpression):
18
+ parent = core
19
+ core = core.term
20
+
21
+ var = Variable("F")
22
+ while var in core.free():
23
+ var = unique_variable(pattern=var)
24
+ core = ApplicationExpression(FunctionVariableExpression(var), core)
25
+
26
+ if parent is not None:
27
+ parent.term = core
28
+ else:
29
+ semantics = core
30
+
31
+ return LambdaExpression(var, semantics)
32
+
33
+
34
+ def compute_function_semantics(function, argument):
35
+ return ApplicationExpression(function, argument).simplify()
36
+
37
+
38
+ def compute_composition_semantics(function, argument):
39
+ assert isinstance(argument, LambdaExpression), (
40
+ "`" + str(argument) + "` must be a lambda expression"
41
+ )
42
+ return LambdaExpression(
43
+ argument.variable, ApplicationExpression(function, argument.term).simplify()
44
+ )
45
+
46
+
47
+ def compute_substitution_semantics(function, argument):
48
+ assert isinstance(function, LambdaExpression) and isinstance(
49
+ function.term, LambdaExpression
50
+ ), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
51
+ assert isinstance(argument, LambdaExpression), (
52
+ "`" + str(argument) + "` must be a lambda expression"
53
+ )
54
+
55
+ new_argument = ApplicationExpression(
56
+ argument, VariableExpression(function.variable)
57
+ ).simplify()
58
+ new_term = ApplicationExpression(function.term, new_argument).simplify()
59
+
60
+ return LambdaExpression(function.variable, new_term)
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__init__.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunkers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ """
11
+ Classes and interfaces for identifying non-overlapping linguistic
12
+ groups (such as base noun phrases) in unrestricted text. This task is
13
+ called "chunk parsing" or "chunking", and the identified groups are
14
+ called "chunks". The chunked text is represented using a shallow
15
+ tree called a "chunk structure." A chunk structure is a tree
16
+ containing tokens and chunks, where each chunk is a subtree containing
17
+ only tokens. For example, the chunk structure for base noun phrase
18
+ chunks in the sentence "I saw the big dog on the hill" is::
19
+
20
+ (SENTENCE:
21
+ (NP: <I>)
22
+ <saw>
23
+ (NP: <the> <big> <dog>)
24
+ <on>
25
+ (NP: <the> <hill>))
26
+
27
+ To convert a chunk structure back to a list of tokens, simply use the
28
+ chunk structure's ``leaves()`` method.
29
+
30
+ This module defines ``ChunkParserI``, a standard interface for
31
+ chunking texts; and ``RegexpChunkParser``, a regular-expression based
32
+ implementation of that interface. It also defines ``ChunkScore``, a
33
+ utility class for scoring chunk parsers.
34
+
35
+ RegexpChunkParser
36
+ =================
37
+
38
+ ``RegexpChunkParser`` is an implementation of the chunk parser interface
39
+ that uses regular-expressions over tags to chunk a text. Its
40
+ ``parse()`` method first constructs a ``ChunkString``, which encodes a
41
+ particular chunking of the input text. Initially, nothing is
42
+ chunked. ``parse.RegexpChunkParser`` then applies a sequence of
43
+ ``RegexpChunkRule`` rules to the ``ChunkString``, each of which modifies
44
+ the chunking that it encodes. Finally, the ``ChunkString`` is
45
+ transformed back into a chunk structure, which is returned.
46
+
47
+ ``RegexpChunkParser`` can only be used to chunk a single kind of phrase.
48
+ For example, you can use an ``RegexpChunkParser`` to chunk the noun
49
+ phrases in a text, or the verb phrases in a text; but you can not
50
+ use it to simultaneously chunk both noun phrases and verb phrases in
51
+ the same text. (This is a limitation of ``RegexpChunkParser``, not of
52
+ chunk parsers in general.)
53
+
54
+ RegexpChunkRules
55
+ ----------------
56
+
57
+ A ``RegexpChunkRule`` is a transformational rule that updates the
58
+ chunking of a text by modifying its ``ChunkString``. Each
59
+ ``RegexpChunkRule`` defines the ``apply()`` method, which modifies
60
+ the chunking encoded by a ``ChunkString``. The
61
+ ``RegexpChunkRule`` class itself can be used to implement any
62
+ transformational rule based on regular expressions. There are
63
+ also a number of subclasses, which can be used to implement
64
+ simpler types of rules:
65
+
66
+ - ``ChunkRule`` chunks anything that matches a given regular
67
+ expression.
68
+ - ``StripRule`` strips anything that matches a given regular
69
+ expression.
70
+ - ``UnChunkRule`` will un-chunk any chunk that matches a given
71
+ regular expression.
72
+ - ``MergeRule`` can be used to merge two contiguous chunks.
73
+ - ``SplitRule`` can be used to split a single chunk into two
74
+ smaller chunks.
75
+ - ``ExpandLeftRule`` will expand a chunk to incorporate new
76
+ unchunked material on the left.
77
+ - ``ExpandRightRule`` will expand a chunk to incorporate new
78
+ unchunked material on the right.
79
+
80
+ Tag Patterns
81
+ ~~~~~~~~~~~~
82
+
83
+ A ``RegexpChunkRule`` uses a modified version of regular
84
+ expression patterns, called "tag patterns". Tag patterns are
85
+ used to match sequences of tags. Examples of tag patterns are::
86
+
87
+ r'(<DT>|<JJ>|<NN>)+'
88
+ r'<NN>+'
89
+ r'<NN.*>'
90
+
91
+ The differences between regular expression patterns and tag
92
+ patterns are:
93
+
94
+ - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
95
+ ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
96
+ ``'<NN'`` followed by one or more repetitions of ``'>'``.
97
+ - Whitespace in tag patterns is ignored. So
98
+ ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
99
+ - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
100
+ ``'<NN.*>'`` matches any single tag starting with ``'NN'``.
101
+
102
+ The function ``tag_pattern2re_pattern`` can be used to transform
103
+ a tag pattern to an equivalent regular expression pattern.
104
+
105
+ Efficiency
106
+ ----------
107
+
108
+ Preliminary tests indicate that ``RegexpChunkParser`` can chunk at a
109
+ rate of about 300 tokens/second, with a moderately complex rule set.
110
+
111
+ There may be problems if ``RegexpChunkParser`` is used with more than
112
+ 5,000 tokens at a time. In particular, evaluation of some regular
113
+ expressions may cause the Python regular expression engine to
114
+ exceed its maximum recursion depth. We have attempted to minimize
115
+ these problems, but it is impossible to avoid them completely. We
116
+ therefore recommend that you apply the chunk parser to a single
117
+ sentence at a time.
118
+
119
+ Emacs Tip
120
+ ---------
121
+
122
+ If you evaluate the following elisp expression in emacs, it will
123
+ colorize a ``ChunkString`` when you use an interactive python shell
124
+ with emacs or xemacs ("C-c !")::
125
+
126
+ (let ()
127
+ (defconst comint-mode-font-lock-keywords
128
+ '(("<[^>]+>" 0 'font-lock-reference-face)
129
+ ("[{}]" 0 'font-lock-function-name-face)))
130
+ (add-hook 'comint-mode-hook (lambda () (turn-on-font-lock))))
131
+
132
+ You can evaluate this code by copying it to a temporary buffer,
133
+ placing the cursor after the last close parenthesis, and typing
134
+ "``C-x C-e``". You should evaluate it before running the interactive
135
+ session. The change will last until you close emacs.
136
+
137
+ Unresolved Issues
138
+ -----------------
139
+
140
+ If we use the ``re`` module for regular expressions, Python's
141
+ regular expression engine generates "maximum recursion depth
142
+ exceeded" errors when processing very large texts, even for
143
+ regular expressions that should not require any recursion. We
144
+ therefore use the ``pre`` module instead. But note that ``pre``
145
+ does not include Unicode support, so this module will not work
146
+ with unicode strings. Note also that ``pre`` regular expressions
147
+ are not quite as advanced as ``re`` ones (e.g., no leftward
148
+ zero-length assertions).
149
+
150
+ :type CHUNK_TAG_PATTERN: regexp
151
+ :var CHUNK_TAG_PATTERN: A regular expression to test whether a tag
152
+ pattern is valid.
153
+ """
154
+
155
+ from nltk.chunk.api import ChunkParserI
156
+ from nltk.chunk.regexp import RegexpChunkParser, RegexpParser
157
+ from nltk.chunk.util import (
158
+ ChunkScore,
159
+ accuracy,
160
+ conllstr2tree,
161
+ conlltags2tree,
162
+ ieerstr2tree,
163
+ tagstr2tree,
164
+ tree2conllstr,
165
+ tree2conlltags,
166
+ )
167
+ from nltk.data import load
168
+
169
+ # Standard treebank POS tagger
170
+ _BINARY_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_binary.pickle"
171
+ _MULTICLASS_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_multiclass.pickle"
172
+
173
+
174
+ def ne_chunk(tagged_tokens, binary=False):
175
+ """
176
+ Use NLTK's currently recommended named entity chunker to
177
+ chunk the given list of tagged tokens.
178
+ """
179
+ if binary:
180
+ chunker_pickle = _BINARY_NE_CHUNKER
181
+ else:
182
+ chunker_pickle = _MULTICLASS_NE_CHUNKER
183
+ chunker = load(chunker_pickle)
184
+ return chunker.parse(tagged_tokens)
185
+
186
+
187
+ def ne_chunk_sents(tagged_sentences, binary=False):
188
+ """
189
+ Use NLTK's currently recommended named entity chunker to chunk the
190
+ given list of tagged sentences, each consisting of a list of tagged tokens.
191
+ """
192
+ if binary:
193
+ chunker_pickle = _BINARY_NE_CHUNKER
194
+ else:
195
+ chunker_pickle = _MULTICLASS_NE_CHUNKER
196
+ chunker = load(chunker_pickle)
197
+ return chunker.parse_sents(tagged_sentences)
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc ADDED
Binary file (9.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc ADDED
Binary file (47.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chunk/api.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunk parsing API
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ ##//////////////////////////////////////////////////////
10
+ ## Chunk Parser Interface
11
+ ##//////////////////////////////////////////////////////
12
+
13
+ from nltk.chunk.util import ChunkScore
14
+ from nltk.internals import deprecated
15
+ from nltk.parse import ParserI
16
+
17
+
18
+ class ChunkParserI(ParserI):
19
+ """
20
+ A processing interface for identifying non-overlapping groups in
21
+ unrestricted text. Typically, chunk parsers are used to find base
22
+ syntactic constituents, such as base noun phrases. Unlike
23
+ ``ParserI``, ``ChunkParserI`` guarantees that the ``parse()`` method
24
+ will always generate a parse.
25
+ """
26
+
27
+ def parse(self, tokens):
28
+ """
29
+ Return the best chunk structure for the given tokens
30
+ and return a tree.
31
+
32
+ :param tokens: The list of (word, tag) tokens to be chunked.
33
+ :type tokens: list(tuple)
34
+ :rtype: Tree
35
+ """
36
+ raise NotImplementedError()
37
+
38
+ @deprecated("Use accuracy(gold) instead.")
39
+ def evaluate(self, gold):
40
+ return self.accuracy(gold)
41
+
42
+ def accuracy(self, gold):
43
+ """
44
+ Score the accuracy of the chunker against the gold standard.
45
+ Remove the chunking the gold standard text, rechunk it using
46
+ the chunker, and return a ``ChunkScore`` object
47
+ reflecting the performance of this chunk parser.
48
+
49
+ :type gold: list(Tree)
50
+ :param gold: The list of chunked sentences to score the chunker on.
51
+ :rtype: ChunkScore
52
+ """
53
+ chunkscore = ChunkScore()
54
+ for correct in gold:
55
+ chunkscore.score(correct, self.parse(correct.leaves()))
56
+ return chunkscore
llmeval-env/lib/python3.10/site-packages/nltk/chunk/named_entity.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunk parsing API
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Named entity chunker
10
+ """
11
+
12
+ import os
13
+ import pickle
14
+ import re
15
+ from xml.etree import ElementTree as ET
16
+
17
+ from nltk.tag import ClassifierBasedTagger, pos_tag
18
+
19
+ try:
20
+ from nltk.classify import MaxentClassifier
21
+ except ImportError:
22
+ pass
23
+
24
+ from nltk.chunk.api import ChunkParserI
25
+ from nltk.chunk.util import ChunkScore
26
+ from nltk.data import find
27
+ from nltk.tokenize import word_tokenize
28
+ from nltk.tree import Tree
29
+
30
+
31
+ class NEChunkParserTagger(ClassifierBasedTagger):
32
+ """
33
+ The IOB tagger used by the chunk parser.
34
+ """
35
+
36
+ def __init__(self, train):
37
+ ClassifierBasedTagger.__init__(
38
+ self, train=train, classifier_builder=self._classifier_builder
39
+ )
40
+
41
+ def _classifier_builder(self, train):
42
+ return MaxentClassifier.train(
43
+ train, algorithm="megam", gaussian_prior_sigma=1, trace=2
44
+ )
45
+
46
+ def _english_wordlist(self):
47
+ try:
48
+ wl = self._en_wordlist
49
+ except AttributeError:
50
+ from nltk.corpus import words
51
+
52
+ self._en_wordlist = set(words.words("en-basic"))
53
+ wl = self._en_wordlist
54
+ return wl
55
+
56
+ def _feature_detector(self, tokens, index, history):
57
+ word = tokens[index][0]
58
+ pos = simplify_pos(tokens[index][1])
59
+ if index == 0:
60
+ prevword = prevprevword = None
61
+ prevpos = prevprevpos = None
62
+ prevshape = prevtag = prevprevtag = None
63
+ elif index == 1:
64
+ prevword = tokens[index - 1][0].lower()
65
+ prevprevword = None
66
+ prevpos = simplify_pos(tokens[index - 1][1])
67
+ prevprevpos = None
68
+ prevtag = history[index - 1][0]
69
+ prevshape = prevprevtag = None
70
+ else:
71
+ prevword = tokens[index - 1][0].lower()
72
+ prevprevword = tokens[index - 2][0].lower()
73
+ prevpos = simplify_pos(tokens[index - 1][1])
74
+ prevprevpos = simplify_pos(tokens[index - 2][1])
75
+ prevtag = history[index - 1]
76
+ prevprevtag = history[index - 2]
77
+ prevshape = shape(prevword)
78
+ if index == len(tokens) - 1:
79
+ nextword = nextnextword = None
80
+ nextpos = nextnextpos = None
81
+ elif index == len(tokens) - 2:
82
+ nextword = tokens[index + 1][0].lower()
83
+ nextpos = tokens[index + 1][1].lower()
84
+ nextnextword = None
85
+ nextnextpos = None
86
+ else:
87
+ nextword = tokens[index + 1][0].lower()
88
+ nextpos = tokens[index + 1][1].lower()
89
+ nextnextword = tokens[index + 2][0].lower()
90
+ nextnextpos = tokens[index + 2][1].lower()
91
+
92
+ # 89.6
93
+ features = {
94
+ "bias": True,
95
+ "shape": shape(word),
96
+ "wordlen": len(word),
97
+ "prefix3": word[:3].lower(),
98
+ "suffix3": word[-3:].lower(),
99
+ "pos": pos,
100
+ "word": word,
101
+ "en-wordlist": (word in self._english_wordlist()),
102
+ "prevtag": prevtag,
103
+ "prevpos": prevpos,
104
+ "nextpos": nextpos,
105
+ "prevword": prevword,
106
+ "nextword": nextword,
107
+ "word+nextpos": f"{word.lower()}+{nextpos}",
108
+ "pos+prevtag": f"{pos}+{prevtag}",
109
+ "shape+prevtag": f"{prevshape}+{prevtag}",
110
+ }
111
+
112
+ return features
113
+
114
+
115
+ class NEChunkParser(ChunkParserI):
116
+ """
117
+ Expected input: list of pos-tagged words
118
+ """
119
+
120
+ def __init__(self, train):
121
+ self._train(train)
122
+
123
+ def parse(self, tokens):
124
+ """
125
+ Each token should be a pos-tagged word
126
+ """
127
+ tagged = self._tagger.tag(tokens)
128
+ tree = self._tagged_to_parse(tagged)
129
+ return tree
130
+
131
+ def _train(self, corpus):
132
+ # Convert to tagged sequence
133
+ corpus = [self._parse_to_tagged(s) for s in corpus]
134
+
135
+ self._tagger = NEChunkParserTagger(train=corpus)
136
+
137
+ def _tagged_to_parse(self, tagged_tokens):
138
+ """
139
+ Convert a list of tagged tokens to a chunk-parse tree.
140
+ """
141
+ sent = Tree("S", [])
142
+
143
+ for (tok, tag) in tagged_tokens:
144
+ if tag == "O":
145
+ sent.append(tok)
146
+ elif tag.startswith("B-"):
147
+ sent.append(Tree(tag[2:], [tok]))
148
+ elif tag.startswith("I-"):
149
+ if sent and isinstance(sent[-1], Tree) and sent[-1].label() == tag[2:]:
150
+ sent[-1].append(tok)
151
+ else:
152
+ sent.append(Tree(tag[2:], [tok]))
153
+ return sent
154
+
155
+ @staticmethod
156
+ def _parse_to_tagged(sent):
157
+ """
158
+ Convert a chunk-parse tree to a list of tagged tokens.
159
+ """
160
+ toks = []
161
+ for child in sent:
162
+ if isinstance(child, Tree):
163
+ if len(child) == 0:
164
+ print("Warning -- empty chunk in sentence")
165
+ continue
166
+ toks.append((child[0], f"B-{child.label()}"))
167
+ for tok in child[1:]:
168
+ toks.append((tok, f"I-{child.label()}"))
169
+ else:
170
+ toks.append((child, "O"))
171
+ return toks
172
+
173
+
174
+ def shape(word):
175
+ if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word, re.UNICODE):
176
+ return "number"
177
+ elif re.match(r"\W+$", word, re.UNICODE):
178
+ return "punct"
179
+ elif re.match(r"\w+$", word, re.UNICODE):
180
+ if word.istitle():
181
+ return "upcase"
182
+ elif word.islower():
183
+ return "downcase"
184
+ else:
185
+ return "mixedcase"
186
+ else:
187
+ return "other"
188
+
189
+
190
+ def simplify_pos(s):
191
+ if s.startswith("V"):
192
+ return "V"
193
+ else:
194
+ return s.split("-")[0]
195
+
196
+
197
+ def postag_tree(tree):
198
+ # Part-of-speech tagging.
199
+ words = tree.leaves()
200
+ tag_iter = (pos for (word, pos) in pos_tag(words))
201
+ newtree = Tree("S", [])
202
+ for child in tree:
203
+ if isinstance(child, Tree):
204
+ newtree.append(Tree(child.label(), []))
205
+ for subchild in child:
206
+ newtree[-1].append((subchild, next(tag_iter)))
207
+ else:
208
+ newtree.append((child, next(tag_iter)))
209
+ return newtree
210
+
211
+
212
+ def load_ace_data(roots, fmt="binary", skip_bnews=True):
213
+ for root in roots:
214
+ for root, dirs, files in os.walk(root):
215
+ if root.endswith("bnews") and skip_bnews:
216
+ continue
217
+ for f in files:
218
+ if f.endswith(".sgm"):
219
+ yield from load_ace_file(os.path.join(root, f), fmt)
220
+
221
+
222
+ def load_ace_file(textfile, fmt):
223
+ print(f" - {os.path.split(textfile)[1]}")
224
+ annfile = textfile + ".tmx.rdc.xml"
225
+
226
+ # Read the xml file, and get a list of entities
227
+ entities = []
228
+ with open(annfile) as infile:
229
+ xml = ET.parse(infile).getroot()
230
+ for entity in xml.findall("document/entity"):
231
+ typ = entity.find("entity_type").text
232
+ for mention in entity.findall("entity_mention"):
233
+ if mention.get("TYPE") != "NAME":
234
+ continue # only NEs
235
+ s = int(mention.find("head/charseq/start").text)
236
+ e = int(mention.find("head/charseq/end").text) + 1
237
+ entities.append((s, e, typ))
238
+
239
+ # Read the text file, and mark the entities.
240
+ with open(textfile) as infile:
241
+ text = infile.read()
242
+
243
+ # Strip XML tags, since they don't count towards the indices
244
+ text = re.sub("<(?!/?TEXT)[^>]+>", "", text)
245
+
246
+ # Blank out anything before/after <TEXT>
247
+ def subfunc(m):
248
+ return " " * (m.end() - m.start() - 6)
249
+
250
+ text = re.sub(r"[\s\S]*<TEXT>", subfunc, text)
251
+ text = re.sub(r"</TEXT>[\s\S]*", "", text)
252
+
253
+ # Simplify quotes
254
+ text = re.sub("``", ' "', text)
255
+ text = re.sub("''", '" ', text)
256
+
257
+ entity_types = {typ for (s, e, typ) in entities}
258
+
259
+ # Binary distinction (NE or not NE)
260
+ if fmt == "binary":
261
+ i = 0
262
+ toks = Tree("S", [])
263
+ for (s, e, typ) in sorted(entities):
264
+ if s < i:
265
+ s = i # Overlapping! Deal with this better?
266
+ if e <= s:
267
+ continue
268
+ toks.extend(word_tokenize(text[i:s]))
269
+ toks.append(Tree("NE", text[s:e].split()))
270
+ i = e
271
+ toks.extend(word_tokenize(text[i:]))
272
+ yield toks
273
+
274
+ # Multiclass distinction (NE type)
275
+ elif fmt == "multiclass":
276
+ i = 0
277
+ toks = Tree("S", [])
278
+ for (s, e, typ) in sorted(entities):
279
+ if s < i:
280
+ s = i # Overlapping! Deal with this better?
281
+ if e <= s:
282
+ continue
283
+ toks.extend(word_tokenize(text[i:s]))
284
+ toks.append(Tree(typ, text[s:e].split()))
285
+ i = e
286
+ toks.extend(word_tokenize(text[i:]))
287
+ yield toks
288
+
289
+ else:
290
+ raise ValueError("bad fmt value")
291
+
292
+
293
+ # This probably belongs in a more general-purpose location (as does
294
+ # the parse_to_tagged function).
295
+ def cmp_chunks(correct, guessed):
296
+ correct = NEChunkParser._parse_to_tagged(correct)
297
+ guessed = NEChunkParser._parse_to_tagged(guessed)
298
+ ellipsis = False
299
+ for (w, ct), (w, gt) in zip(correct, guessed):
300
+ if ct == gt == "O":
301
+ if not ellipsis:
302
+ print(f" {ct:15} {gt:15} {w}")
303
+ print(" {:15} {:15} {2}".format("...", "...", "..."))
304
+ ellipsis = True
305
+ else:
306
+ ellipsis = False
307
+ print(f" {ct:15} {gt:15} {w}")
308
+
309
+
310
+ def build_model(fmt="binary"):
311
+ print("Loading training data...")
312
+ train_paths = [
313
+ find("corpora/ace_data/ace.dev"),
314
+ find("corpora/ace_data/ace.heldout"),
315
+ find("corpora/ace_data/bbn.dev"),
316
+ find("corpora/ace_data/muc.dev"),
317
+ ]
318
+ train_trees = load_ace_data(train_paths, fmt)
319
+ train_data = [postag_tree(t) for t in train_trees]
320
+ print("Training...")
321
+ cp = NEChunkParser(train_data)
322
+ del train_data
323
+
324
+ print("Loading eval data...")
325
+ eval_paths = [find("corpora/ace_data/ace.eval")]
326
+ eval_trees = load_ace_data(eval_paths, fmt)
327
+ eval_data = [postag_tree(t) for t in eval_trees]
328
+
329
+ print("Evaluating...")
330
+ chunkscore = ChunkScore()
331
+ for i, correct in enumerate(eval_data):
332
+ guess = cp.parse(correct.leaves())
333
+ chunkscore.score(correct, guess)
334
+ if i < 3:
335
+ cmp_chunks(correct, guess)
336
+ print(chunkscore)
337
+
338
+ outfilename = f"/tmp/ne_chunker_{fmt}.pickle"
339
+ print(f"Saving chunker to {outfilename}...")
340
+
341
+ with open(outfilename, "wb") as outfile:
342
+ pickle.dump(cp, outfile, -1)
343
+
344
+ return cp
345
+
346
+
347
+ if __name__ == "__main__":
348
+ # Make sure that the pickled object has the right class name:
349
+ from nltk.chunk.named_entity import build_model
350
+
351
+ build_model("binary")
352
+ build_model("multiclass")
llmeval-env/lib/python3.10/site-packages/nltk/chunk/regexp.py ADDED
@@ -0,0 +1,1475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Regular Expression Chunkers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import re
10
+
11
+ import regex
12
+
13
+ from nltk.chunk.api import ChunkParserI
14
+ from nltk.tree import Tree
15
+
16
+ # //////////////////////////////////////////////////////
17
+ # ChunkString
18
+ # //////////////////////////////////////////////////////
19
+
20
+
21
+ class ChunkString:
22
+ """
23
+ A string-based encoding of a particular chunking of a text.
24
+ Internally, the ``ChunkString`` class uses a single string to
25
+ encode the chunking of the input text. This string contains a
26
+ sequence of angle-bracket delimited tags, with chunking indicated
27
+ by braces. An example of this encoding is::
28
+
29
+ {<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
30
+
31
+ ``ChunkString`` are created from tagged texts (i.e., lists of
32
+ ``tokens`` whose type is ``TaggedType``). Initially, nothing is
33
+ chunked.
34
+
35
+ The chunking of a ``ChunkString`` can be modified with the ``xform()``
36
+ method, which uses a regular expression to transform the string
37
+ representation. These transformations should only add and remove
38
+ braces; they should *not* modify the sequence of angle-bracket
39
+ delimited tags.
40
+
41
+ :type _str: str
42
+ :ivar _str: The internal string representation of the text's
43
+ encoding. This string representation contains a sequence of
44
+ angle-bracket delimited tags, with chunking indicated by
45
+ braces. An example of this encoding is::
46
+
47
+ {<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
48
+
49
+ :type _pieces: list(tagged tokens and chunks)
50
+ :ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``.
51
+ :ivar _debug: The debug level. See the constructor docs.
52
+
53
+ :cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that
54
+ will only match positions that are in chunks.
55
+ :cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that
56
+ will only match positions that are in strips.
57
+ """
58
+
59
+ CHUNK_TAG_CHAR = r"[^\{\}<>]"
60
+ CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR
61
+
62
+ IN_CHUNK_PATTERN = r"(?=[^\{]*\})"
63
+ IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))"
64
+
65
+ # These are used by _verify
66
+ _CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG
67
+ _STRIP = r"(%s+?)+?" % CHUNK_TAG
68
+ _VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG)
69
+ _BRACKETS = re.compile(r"[^\{\}]+")
70
+ _BALANCED_BRACKETS = re.compile(r"(\{\})*$")
71
+
72
+ def __init__(self, chunk_struct, debug_level=1):
73
+ """
74
+ Construct a new ``ChunkString`` that encodes the chunking of
75
+ the text ``tagged_tokens``.
76
+
77
+ :type chunk_struct: Tree
78
+ :param chunk_struct: The chunk structure to be further chunked.
79
+ :type debug_level: int
80
+ :param debug_level: The level of debugging which should be
81
+ applied to transformations on the ``ChunkString``. The
82
+ valid levels are:
83
+
84
+ - 0: no checks
85
+ - 1: full check on to_chunkstruct
86
+ - 2: full check on to_chunkstruct and cursory check after
87
+ each transformation.
88
+ - 3: full check on to_chunkstruct and full check after
89
+ each transformation.
90
+
91
+ We recommend you use at least level 1. You should
92
+ probably use level 3 if you use any non-standard
93
+ subclasses of ``RegexpChunkRule``.
94
+ """
95
+ self._root_label = chunk_struct.label()
96
+ self._pieces = chunk_struct[:]
97
+ tags = [self._tag(tok) for tok in self._pieces]
98
+ self._str = "<" + "><".join(tags) + ">"
99
+ self._debug = debug_level
100
+
101
+ def _tag(self, tok):
102
+ if isinstance(tok, tuple):
103
+ return tok[1]
104
+ elif isinstance(tok, Tree):
105
+ return tok.label()
106
+ else:
107
+ raise ValueError("chunk structures must contain tagged " "tokens or trees")
108
+
109
+ def _verify(self, s, verify_tags):
110
+ """
111
+ Check to make sure that ``s`` still corresponds to some chunked
112
+ version of ``_pieces``.
113
+
114
+ :type verify_tags: bool
115
+ :param verify_tags: Whether the individual tags should be
116
+ checked. If this is false, ``_verify`` will check to make
117
+ sure that ``_str`` encodes a chunked version of *some*
118
+ list of tokens. If this is true, then ``_verify`` will
119
+ check to make sure that the tags in ``_str`` match those in
120
+ ``_pieces``.
121
+
122
+ :raise ValueError: if the internal string representation of
123
+ this ``ChunkString`` is invalid or not consistent with _pieces.
124
+ """
125
+ # Check overall form
126
+ if not ChunkString._VALID.match(s):
127
+ raise ValueError(
128
+ "Transformation generated invalid " "chunkstring:\n %s" % s
129
+ )
130
+
131
+ # Check that parens are balanced. If the string is long, we
132
+ # have to do this in pieces, to avoid a maximum recursion
133
+ # depth limit for regular expressions.
134
+ brackets = ChunkString._BRACKETS.sub("", s)
135
+ for i in range(1 + len(brackets) // 5000):
136
+ substr = brackets[i * 5000 : i * 5000 + 5000]
137
+ if not ChunkString._BALANCED_BRACKETS.match(substr):
138
+ raise ValueError(
139
+ "Transformation generated invalid " "chunkstring:\n %s" % s
140
+ )
141
+
142
+ if verify_tags <= 0:
143
+ return
144
+
145
+ tags1 = (re.split(r"[\{\}<>]+", s))[1:-1]
146
+ tags2 = [self._tag(piece) for piece in self._pieces]
147
+ if tags1 != tags2:
148
+ raise ValueError(
149
+ "Transformation generated invalid " "chunkstring: tag changed"
150
+ )
151
+
152
+ def to_chunkstruct(self, chunk_label="CHUNK"):
153
+ """
154
+ Return the chunk structure encoded by this ``ChunkString``.
155
+
156
+ :rtype: Tree
157
+ :raise ValueError: If a transformation has generated an
158
+ invalid chunkstring.
159
+ """
160
+ if self._debug > 0:
161
+ self._verify(self._str, 1)
162
+
163
+ # Use this alternating list to create the chunkstruct.
164
+ pieces = []
165
+ index = 0
166
+ piece_in_chunk = 0
167
+ for piece in re.split("[{}]", self._str):
168
+
169
+ # Find the list of tokens contained in this piece.
170
+ length = piece.count("<")
171
+ subsequence = self._pieces[index : index + length]
172
+
173
+ # Add this list of tokens to our pieces.
174
+ if piece_in_chunk:
175
+ pieces.append(Tree(chunk_label, subsequence))
176
+ else:
177
+ pieces += subsequence
178
+
179
+ # Update index, piece_in_chunk
180
+ index += length
181
+ piece_in_chunk = not piece_in_chunk
182
+
183
+ return Tree(self._root_label, pieces)
184
+
185
+ def xform(self, regexp, repl):
186
+ """
187
+ Apply the given transformation to the string encoding of this
188
+ ``ChunkString``. In particular, find all occurrences that match
189
+ ``regexp``, and replace them using ``repl`` (as done by
190
+ ``re.sub``).
191
+
192
+ This transformation should only add and remove braces; it
193
+ should *not* modify the sequence of angle-bracket delimited
194
+ tags. Furthermore, this transformation may not result in
195
+ improper bracketing. Note, in particular, that bracketing may
196
+ not be nested.
197
+
198
+ :type regexp: str or regexp
199
+ :param regexp: A regular expression matching the substring
200
+ that should be replaced. This will typically include a
201
+ named group, which can be used by ``repl``.
202
+ :type repl: str
203
+ :param repl: An expression specifying what should replace the
204
+ matched substring. Typically, this will include a named
205
+ replacement group, specified by ``regexp``.
206
+ :rtype: None
207
+ :raise ValueError: If this transformation generated an
208
+ invalid chunkstring.
209
+ """
210
+ # Do the actual substitution
211
+ s = re.sub(regexp, repl, self._str)
212
+
213
+ # The substitution might have generated "empty chunks"
214
+ # (substrings of the form "{}"). Remove them, so they don't
215
+ # interfere with other transformations.
216
+ s = re.sub(r"\{\}", "", s)
217
+
218
+ # Make sure that the transformation was legal.
219
+ if self._debug > 1:
220
+ self._verify(s, self._debug - 2)
221
+
222
+ # Commit the transformation.
223
+ self._str = s
224
+
225
+ def __repr__(self):
226
+ """
227
+ Return a string representation of this ``ChunkString``.
228
+ It has the form::
229
+
230
+ <ChunkString: '{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}'>
231
+
232
+ :rtype: str
233
+ """
234
+ return "<ChunkString: %s>" % repr(self._str)
235
+
236
+ def __str__(self):
237
+ """
238
+ Return a formatted representation of this ``ChunkString``.
239
+ This representation will include extra spaces to ensure that
240
+ tags will line up with the representation of other
241
+ ``ChunkStrings`` for the same text, regardless of the chunking.
242
+
243
+ :rtype: str
244
+ """
245
+ # Add spaces to make everything line up.
246
+ str = re.sub(r">(?!\})", r"> ", self._str)
247
+ str = re.sub(r"([^\{])<", r"\1 <", str)
248
+ if str[0] == "<":
249
+ str = " " + str
250
+ return str
251
+
252
+
253
+ # //////////////////////////////////////////////////////
254
+ # Chunking Rules
255
+ # //////////////////////////////////////////////////////
256
+
257
+
258
+ class RegexpChunkRule:
259
+ """
260
+ A rule specifying how to modify the chunking in a ``ChunkString``,
261
+ using a transformational regular expression. The
262
+ ``RegexpChunkRule`` class itself can be used to implement any
263
+ transformational rule based on regular expressions. There are
264
+ also a number of subclasses, which can be used to implement
265
+ simpler types of rules, based on matching regular expressions.
266
+
267
+ Each ``RegexpChunkRule`` has a regular expression and a
268
+ replacement expression. When a ``RegexpChunkRule`` is "applied"
269
+ to a ``ChunkString``, it searches the ``ChunkString`` for any
270
+ substring that matches the regular expression, and replaces it
271
+ using the replacement expression. This search/replace operation
272
+ has the same semantics as ``re.sub``.
273
+
274
+ Each ``RegexpChunkRule`` also has a description string, which
275
+ gives a short (typically less than 75 characters) description of
276
+ the purpose of the rule.
277
+
278
+ This transformation defined by this ``RegexpChunkRule`` should
279
+ only add and remove braces; it should *not* modify the sequence
280
+ of angle-bracket delimited tags. Furthermore, this transformation
281
+ may not result in nested or mismatched bracketing.
282
+ """
283
+
284
+ def __init__(self, regexp, repl, descr):
285
+ """
286
+ Construct a new RegexpChunkRule.
287
+
288
+ :type regexp: regexp or str
289
+ :param regexp: The regular expression for this ``RegexpChunkRule``.
290
+ When this rule is applied to a ``ChunkString``, any
291
+ substring that matches ``regexp`` will be replaced using
292
+ the replacement string ``repl``. Note that this must be a
293
+ normal regular expression, not a tag pattern.
294
+ :type repl: str
295
+ :param repl: The replacement expression for this ``RegexpChunkRule``.
296
+ When this rule is applied to a ``ChunkString``, any substring
297
+ that matches ``regexp`` will be replaced using ``repl``.
298
+ :type descr: str
299
+ :param descr: A short description of the purpose and/or effect
300
+ of this rule.
301
+ """
302
+ if isinstance(regexp, str):
303
+ regexp = re.compile(regexp)
304
+ self._repl = repl
305
+ self._descr = descr
306
+ self._regexp = regexp
307
+
308
+ def apply(self, chunkstr):
309
+ # Keep docstring generic so we can inherit it.
310
+ """
311
+ Apply this rule to the given ``ChunkString``. See the
312
+ class reference documentation for a description of what it
313
+ means to apply a rule.
314
+
315
+ :type chunkstr: ChunkString
316
+ :param chunkstr: The chunkstring to which this rule is applied.
317
+ :rtype: None
318
+ :raise ValueError: If this transformation generated an
319
+ invalid chunkstring.
320
+ """
321
+ chunkstr.xform(self._regexp, self._repl)
322
+
323
+ def descr(self):
324
+ """
325
+ Return a short description of the purpose and/or effect of
326
+ this rule.
327
+
328
+ :rtype: str
329
+ """
330
+ return self._descr
331
+
332
+ def __repr__(self):
333
+ """
334
+ Return a string representation of this rule. It has the form::
335
+
336
+ <RegexpChunkRule: '{<IN|VB.*>}'->'<IN>'>
337
+
338
+ Note that this representation does not include the
339
+ description string; that string can be accessed
340
+ separately with the ``descr()`` method.
341
+
342
+ :rtype: str
343
+ """
344
+ return (
345
+ "<RegexpChunkRule: "
346
+ + repr(self._regexp.pattern)
347
+ + "->"
348
+ + repr(self._repl)
349
+ + ">"
350
+ )
351
+
352
+ @staticmethod
353
+ def fromstring(s):
354
+ """
355
+ Create a RegexpChunkRule from a string description.
356
+ Currently, the following formats are supported::
357
+
358
+ {regexp} # chunk rule
359
+ }regexp{ # strip rule
360
+ regexp}{regexp # split rule
361
+ regexp{}regexp # merge rule
362
+
363
+ Where ``regexp`` is a regular expression for the rule. Any
364
+ text following the comment marker (``#``) will be used as
365
+ the rule's description:
366
+
367
+ >>> from nltk.chunk.regexp import RegexpChunkRule
368
+ >>> RegexpChunkRule.fromstring('{<DT>?<NN.*>+}')
369
+ <ChunkRule: '<DT>?<NN.*>+'>
370
+ """
371
+ # Split off the comment (but don't split on '\#')
372
+ m = re.match(r"(?P<rule>(\\.|[^#])*)(?P<comment>#.*)?", s)
373
+ rule = m.group("rule").strip()
374
+ comment = (m.group("comment") or "")[1:].strip()
375
+
376
+ # Pattern bodies: chunk, strip, split, merge
377
+ try:
378
+ if not rule:
379
+ raise ValueError("Empty chunk pattern")
380
+ if rule[0] == "{" and rule[-1] == "}":
381
+ return ChunkRule(rule[1:-1], comment)
382
+ elif rule[0] == "}" and rule[-1] == "{":
383
+ return StripRule(rule[1:-1], comment)
384
+ elif "}{" in rule:
385
+ left, right = rule.split("}{")
386
+ return SplitRule(left, right, comment)
387
+ elif "{}" in rule:
388
+ left, right = rule.split("{}")
389
+ return MergeRule(left, right, comment)
390
+ elif re.match("[^{}]*{[^{}]*}[^{}]*", rule):
391
+ left, chunk, right = re.split("[{}]", rule)
392
+ return ChunkRuleWithContext(left, chunk, right, comment)
393
+ else:
394
+ raise ValueError("Illegal chunk pattern: %s" % rule)
395
+ except (ValueError, re.error) as e:
396
+ raise ValueError("Illegal chunk pattern: %s" % rule) from e
397
+
398
+
399
+ class ChunkRule(RegexpChunkRule):
400
+ """
401
+ A rule specifying how to add chunks to a ``ChunkString``, using a
402
+ matching tag pattern. When applied to a ``ChunkString``, it will
403
+ find any substring that matches this tag pattern and that is not
404
+ already part of a chunk, and create a new chunk containing that
405
+ substring.
406
+ """
407
+
408
+ def __init__(self, tag_pattern, descr):
409
+ """
410
+ Construct a new ``ChunkRule``.
411
+
412
+ :type tag_pattern: str
413
+ :param tag_pattern: This rule's tag pattern. When
414
+ applied to a ``ChunkString``, this rule will
415
+ chunk any substring that matches this tag pattern and that
416
+ is not already part of a chunk.
417
+ :type descr: str
418
+ :param descr: A short description of the purpose and/or effect
419
+ of this rule.
420
+ """
421
+ self._pattern = tag_pattern
422
+ regexp = re.compile(
423
+ "(?P<chunk>%s)%s"
424
+ % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_STRIP_PATTERN)
425
+ )
426
+ RegexpChunkRule.__init__(self, regexp, r"{\g<chunk>}", descr)
427
+
428
+ def __repr__(self):
429
+ """
430
+ Return a string representation of this rule. It has the form::
431
+
432
+ <ChunkRule: '<IN|VB.*>'>
433
+
434
+ Note that this representation does not include the
435
+ description string; that string can be accessed
436
+ separately with the ``descr()`` method.
437
+
438
+ :rtype: str
439
+ """
440
+ return "<ChunkRule: " + repr(self._pattern) + ">"
441
+
442
+
443
+ class StripRule(RegexpChunkRule):
444
+ """
445
+ A rule specifying how to remove strips to a ``ChunkString``,
446
+ using a matching tag pattern. When applied to a
447
+ ``ChunkString``, it will find any substring that matches this
448
+ tag pattern and that is contained in a chunk, and remove it
449
+ from that chunk, thus creating two new chunks.
450
+ """
451
+
452
+ def __init__(self, tag_pattern, descr):
453
+ """
454
+ Construct a new ``StripRule``.
455
+
456
+ :type tag_pattern: str
457
+ :param tag_pattern: This rule's tag pattern. When
458
+ applied to a ``ChunkString``, this rule will
459
+ find any substring that matches this tag pattern and that
460
+ is contained in a chunk, and remove it from that chunk,
461
+ thus creating two new chunks.
462
+ :type descr: str
463
+ :param descr: A short description of the purpose and/or effect
464
+ of this rule.
465
+ """
466
+ self._pattern = tag_pattern
467
+ regexp = re.compile(
468
+ "(?P<strip>%s)%s"
469
+ % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_CHUNK_PATTERN)
470
+ )
471
+ RegexpChunkRule.__init__(self, regexp, r"}\g<strip>{", descr)
472
+
473
+ def __repr__(self):
474
+ """
475
+ Return a string representation of this rule. It has the form::
476
+
477
+ <StripRule: '<IN|VB.*>'>
478
+
479
+ Note that this representation does not include the
480
+ description string; that string can be accessed
481
+ separately with the ``descr()`` method.
482
+
483
+ :rtype: str
484
+ """
485
+ return "<StripRule: " + repr(self._pattern) + ">"
486
+
487
+
488
+ class UnChunkRule(RegexpChunkRule):
489
+ """
490
+ A rule specifying how to remove chunks to a ``ChunkString``,
491
+ using a matching tag pattern. When applied to a
492
+ ``ChunkString``, it will find any complete chunk that matches this
493
+ tag pattern, and un-chunk it.
494
+ """
495
+
496
+ def __init__(self, tag_pattern, descr):
497
+ """
498
+ Construct a new ``UnChunkRule``.
499
+
500
+ :type tag_pattern: str
501
+ :param tag_pattern: This rule's tag pattern. When
502
+ applied to a ``ChunkString``, this rule will
503
+ find any complete chunk that matches this tag pattern,
504
+ and un-chunk it.
505
+ :type descr: str
506
+ :param descr: A short description of the purpose and/or effect
507
+ of this rule.
508
+ """
509
+ self._pattern = tag_pattern
510
+ regexp = re.compile(r"\{(?P<chunk>%s)\}" % tag_pattern2re_pattern(tag_pattern))
511
+ RegexpChunkRule.__init__(self, regexp, r"\g<chunk>", descr)
512
+
513
+ def __repr__(self):
514
+ """
515
+ Return a string representation of this rule. It has the form::
516
+
517
+ <UnChunkRule: '<IN|VB.*>'>
518
+
519
+ Note that this representation does not include the
520
+ description string; that string can be accessed
521
+ separately with the ``descr()`` method.
522
+
523
+ :rtype: str
524
+ """
525
+ return "<UnChunkRule: " + repr(self._pattern) + ">"
526
+
527
+
528
+ class MergeRule(RegexpChunkRule):
529
+ """
530
+ A rule specifying how to merge chunks in a ``ChunkString``, using
531
+ two matching tag patterns: a left pattern, and a right pattern.
532
+ When applied to a ``ChunkString``, it will find any chunk whose end
533
+ matches left pattern, and immediately followed by a chunk whose
534
+ beginning matches right pattern. It will then merge those two
535
+ chunks into a single chunk.
536
+ """
537
+
538
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
539
+ """
540
+ Construct a new ``MergeRule``.
541
+
542
+ :type right_tag_pattern: str
543
+ :param right_tag_pattern: This rule's right tag
544
+ pattern. When applied to a ``ChunkString``, this
545
+ rule will find any chunk whose end matches
546
+ ``left_tag_pattern``, and immediately followed by a chunk
547
+ whose beginning matches this pattern. It will
548
+ then merge those two chunks into a single chunk.
549
+ :type left_tag_pattern: str
550
+ :param left_tag_pattern: This rule's left tag
551
+ pattern. When applied to a ``ChunkString``, this
552
+ rule will find any chunk whose end matches
553
+ this pattern, and immediately followed by a chunk
554
+ whose beginning matches ``right_tag_pattern``. It will
555
+ then merge those two chunks into a single chunk.
556
+
557
+ :type descr: str
558
+ :param descr: A short description of the purpose and/or effect
559
+ of this rule.
560
+ """
561
+ # Ensure that the individual patterns are coherent. E.g., if
562
+ # left='(' and right=')', then this will raise an exception:
563
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
564
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
565
+
566
+ self._left_tag_pattern = left_tag_pattern
567
+ self._right_tag_pattern = right_tag_pattern
568
+ regexp = re.compile(
569
+ "(?P<left>%s)}{(?=%s)"
570
+ % (
571
+ tag_pattern2re_pattern(left_tag_pattern),
572
+ tag_pattern2re_pattern(right_tag_pattern),
573
+ )
574
+ )
575
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>", descr)
576
+
577
+ def __repr__(self):
578
+ """
579
+ Return a string representation of this rule. It has the form::
580
+
581
+ <MergeRule: '<NN|DT|JJ>', '<NN|JJ>'>
582
+
583
+ Note that this representation does not include the
584
+ description string; that string can be accessed
585
+ separately with the ``descr()`` method.
586
+
587
+ :rtype: str
588
+ """
589
+ return (
590
+ "<MergeRule: "
591
+ + repr(self._left_tag_pattern)
592
+ + ", "
593
+ + repr(self._right_tag_pattern)
594
+ + ">"
595
+ )
596
+
597
+
598
+ class SplitRule(RegexpChunkRule):
599
+ """
600
+ A rule specifying how to split chunks in a ``ChunkString``, using
601
+ two matching tag patterns: a left pattern, and a right pattern.
602
+ When applied to a ``ChunkString``, it will find any chunk that
603
+ matches the left pattern followed by the right pattern. It will
604
+ then split the chunk into two new chunks, at the point between the
605
+ two pattern matches.
606
+ """
607
+
608
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
609
+ """
610
+ Construct a new ``SplitRule``.
611
+
612
+ :type right_tag_pattern: str
613
+ :param right_tag_pattern: This rule's right tag
614
+ pattern. When applied to a ``ChunkString``, this rule will
615
+ find any chunk containing a substring that matches
616
+ ``left_tag_pattern`` followed by this pattern. It will
617
+ then split the chunk into two new chunks at the point
618
+ between these two matching patterns.
619
+ :type left_tag_pattern: str
620
+ :param left_tag_pattern: This rule's left tag
621
+ pattern. When applied to a ``ChunkString``, this rule will
622
+ find any chunk containing a substring that matches this
623
+ pattern followed by ``right_tag_pattern``. It will then
624
+ split the chunk into two new chunks at the point between
625
+ these two matching patterns.
626
+ :type descr: str
627
+ :param descr: A short description of the purpose and/or effect
628
+ of this rule.
629
+ """
630
+ # Ensure that the individual patterns are coherent. E.g., if
631
+ # left='(' and right=')', then this will raise an exception:
632
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
633
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
634
+
635
+ self._left_tag_pattern = left_tag_pattern
636
+ self._right_tag_pattern = right_tag_pattern
637
+ regexp = re.compile(
638
+ "(?P<left>%s)(?=%s)"
639
+ % (
640
+ tag_pattern2re_pattern(left_tag_pattern),
641
+ tag_pattern2re_pattern(right_tag_pattern),
642
+ )
643
+ )
644
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>}{", descr)
645
+
646
+ def __repr__(self):
647
+ """
648
+ Return a string representation of this rule. It has the form::
649
+
650
+ <SplitRule: '<NN>', '<DT>'>
651
+
652
+ Note that this representation does not include the
653
+ description string; that string can be accessed
654
+ separately with the ``descr()`` method.
655
+
656
+ :rtype: str
657
+ """
658
+ return (
659
+ "<SplitRule: "
660
+ + repr(self._left_tag_pattern)
661
+ + ", "
662
+ + repr(self._right_tag_pattern)
663
+ + ">"
664
+ )
665
+
666
+
667
+ class ExpandLeftRule(RegexpChunkRule):
668
+ """
669
+ A rule specifying how to expand chunks in a ``ChunkString`` to the left,
670
+ using two matching tag patterns: a left pattern, and a right pattern.
671
+ When applied to a ``ChunkString``, it will find any chunk whose beginning
672
+ matches right pattern, and immediately preceded by a strip whose
673
+ end matches left pattern. It will then expand the chunk to incorporate
674
+ the new material on the left.
675
+ """
676
+
677
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
678
+ """
679
+ Construct a new ``ExpandRightRule``.
680
+
681
+ :type right_tag_pattern: str
682
+ :param right_tag_pattern: This rule's right tag
683
+ pattern. When applied to a ``ChunkString``, this
684
+ rule will find any chunk whose beginning matches
685
+ ``right_tag_pattern``, and immediately preceded by a strip
686
+ whose end matches this pattern. It will
687
+ then merge those two chunks into a single chunk.
688
+ :type left_tag_pattern: str
689
+ :param left_tag_pattern: This rule's left tag
690
+ pattern. When applied to a ``ChunkString``, this
691
+ rule will find any chunk whose beginning matches
692
+ this pattern, and immediately preceded by a strip
693
+ whose end matches ``left_tag_pattern``. It will
694
+ then expand the chunk to incorporate the new material on the left.
695
+
696
+ :type descr: str
697
+ :param descr: A short description of the purpose and/or effect
698
+ of this rule.
699
+ """
700
+ # Ensure that the individual patterns are coherent. E.g., if
701
+ # left='(' and right=')', then this will raise an exception:
702
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
703
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
704
+
705
+ self._left_tag_pattern = left_tag_pattern
706
+ self._right_tag_pattern = right_tag_pattern
707
+ regexp = re.compile(
708
+ r"(?P<left>%s)\{(?P<right>%s)"
709
+ % (
710
+ tag_pattern2re_pattern(left_tag_pattern),
711
+ tag_pattern2re_pattern(right_tag_pattern),
712
+ )
713
+ )
714
+ RegexpChunkRule.__init__(self, regexp, r"{\g<left>\g<right>", descr)
715
+
716
+ def __repr__(self):
717
+ """
718
+ Return a string representation of this rule. It has the form::
719
+
720
+ <ExpandLeftRule: '<NN|DT|JJ>', '<NN|JJ>'>
721
+
722
+ Note that this representation does not include the
723
+ description string; that string can be accessed
724
+ separately with the ``descr()`` method.
725
+
726
+ :rtype: str
727
+ """
728
+ return (
729
+ "<ExpandLeftRule: "
730
+ + repr(self._left_tag_pattern)
731
+ + ", "
732
+ + repr(self._right_tag_pattern)
733
+ + ">"
734
+ )
735
+
736
+
737
+ class ExpandRightRule(RegexpChunkRule):
738
+ """
739
+ A rule specifying how to expand chunks in a ``ChunkString`` to the
740
+ right, using two matching tag patterns: a left pattern, and a
741
+ right pattern. When applied to a ``ChunkString``, it will find any
742
+ chunk whose end matches left pattern, and immediately followed by
743
+ a strip whose beginning matches right pattern. It will then
744
+ expand the chunk to incorporate the new material on the right.
745
+ """
746
+
747
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
748
+ """
749
+ Construct a new ``ExpandRightRule``.
750
+
751
+ :type right_tag_pattern: str
752
+ :param right_tag_pattern: This rule's right tag
753
+ pattern. When applied to a ``ChunkString``, this
754
+ rule will find any chunk whose end matches
755
+ ``left_tag_pattern``, and immediately followed by a strip
756
+ whose beginning matches this pattern. It will
757
+ then merge those two chunks into a single chunk.
758
+ :type left_tag_pattern: str
759
+ :param left_tag_pattern: This rule's left tag
760
+ pattern. When applied to a ``ChunkString``, this
761
+ rule will find any chunk whose end matches
762
+ this pattern, and immediately followed by a strip
763
+ whose beginning matches ``right_tag_pattern``. It will
764
+ then expand the chunk to incorporate the new material on the right.
765
+
766
+ :type descr: str
767
+ :param descr: A short description of the purpose and/or effect
768
+ of this rule.
769
+ """
770
+ # Ensure that the individual patterns are coherent. E.g., if
771
+ # left='(' and right=')', then this will raise an exception:
772
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
773
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
774
+
775
+ self._left_tag_pattern = left_tag_pattern
776
+ self._right_tag_pattern = right_tag_pattern
777
+ regexp = re.compile(
778
+ r"(?P<left>%s)\}(?P<right>%s)"
779
+ % (
780
+ tag_pattern2re_pattern(left_tag_pattern),
781
+ tag_pattern2re_pattern(right_tag_pattern),
782
+ )
783
+ )
784
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>\g<right>}", descr)
785
+
786
+ def __repr__(self):
787
+ """
788
+ Return a string representation of this rule. It has the form::
789
+
790
+ <ExpandRightRule: '<NN|DT|JJ>', '<NN|JJ>'>
791
+
792
+ Note that this representation does not include the
793
+ description string; that string can be accessed
794
+ separately with the ``descr()`` method.
795
+
796
+ :rtype: str
797
+ """
798
+ return (
799
+ "<ExpandRightRule: "
800
+ + repr(self._left_tag_pattern)
801
+ + ", "
802
+ + repr(self._right_tag_pattern)
803
+ + ">"
804
+ )
805
+
806
+
807
+ class ChunkRuleWithContext(RegexpChunkRule):
808
+ """
809
+ A rule specifying how to add chunks to a ``ChunkString``, using
810
+ three matching tag patterns: one for the left context, one for the
811
+ chunk, and one for the right context. When applied to a
812
+ ``ChunkString``, it will find any substring that matches the chunk
813
+ tag pattern, is surrounded by substrings that match the two
814
+ context patterns, and is not already part of a chunk; and create a
815
+ new chunk containing the substring that matched the chunk tag
816
+ pattern.
817
+
818
+ Caveat: Both the left and right context are consumed when this
819
+ rule matches; therefore, if you need to find overlapping matches,
820
+ you will need to apply your rule more than once.
821
+ """
822
+
823
+ def __init__(
824
+ self,
825
+ left_context_tag_pattern,
826
+ chunk_tag_pattern,
827
+ right_context_tag_pattern,
828
+ descr,
829
+ ):
830
+ """
831
+ Construct a new ``ChunkRuleWithContext``.
832
+
833
+ :type left_context_tag_pattern: str
834
+ :param left_context_tag_pattern: A tag pattern that must match
835
+ the left context of ``chunk_tag_pattern`` for this rule to
836
+ apply.
837
+ :type chunk_tag_pattern: str
838
+ :param chunk_tag_pattern: A tag pattern that must match for this
839
+ rule to apply. If the rule does apply, then this pattern
840
+ also identifies the substring that will be made into a chunk.
841
+ :type right_context_tag_pattern: str
842
+ :param right_context_tag_pattern: A tag pattern that must match
843
+ the right context of ``chunk_tag_pattern`` for this rule to
844
+ apply.
845
+ :type descr: str
846
+ :param descr: A short description of the purpose and/or effect
847
+ of this rule.
848
+ """
849
+ # Ensure that the individual patterns are coherent. E.g., if
850
+ # left='(' and right=')', then this will raise an exception:
851
+ re.compile(tag_pattern2re_pattern(left_context_tag_pattern))
852
+ re.compile(tag_pattern2re_pattern(chunk_tag_pattern))
853
+ re.compile(tag_pattern2re_pattern(right_context_tag_pattern))
854
+
855
+ self._left_context_tag_pattern = left_context_tag_pattern
856
+ self._chunk_tag_pattern = chunk_tag_pattern
857
+ self._right_context_tag_pattern = right_context_tag_pattern
858
+ regexp = re.compile(
859
+ "(?P<left>%s)(?P<chunk>%s)(?P<right>%s)%s"
860
+ % (
861
+ tag_pattern2re_pattern(left_context_tag_pattern),
862
+ tag_pattern2re_pattern(chunk_tag_pattern),
863
+ tag_pattern2re_pattern(right_context_tag_pattern),
864
+ ChunkString.IN_STRIP_PATTERN,
865
+ )
866
+ )
867
+ replacement = r"\g<left>{\g<chunk>}\g<right>"
868
+ RegexpChunkRule.__init__(self, regexp, replacement, descr)
869
+
870
+ def __repr__(self):
871
+ """
872
+ Return a string representation of this rule. It has the form::
873
+
874
+ <ChunkRuleWithContext: '<IN>', '<NN>', '<DT>'>
875
+
876
+ Note that this representation does not include the
877
+ description string; that string can be accessed
878
+ separately with the ``descr()`` method.
879
+
880
+ :rtype: str
881
+ """
882
+ return "<ChunkRuleWithContext: {!r}, {!r}, {!r}>".format(
883
+ self._left_context_tag_pattern,
884
+ self._chunk_tag_pattern,
885
+ self._right_context_tag_pattern,
886
+ )
887
+
888
+
889
+ # //////////////////////////////////////////////////////
890
+ # Tag Pattern Format Conversion
891
+ # //////////////////////////////////////////////////////
892
+
893
+ # this should probably be made more strict than it is -- e.g., it
894
+ # currently accepts 'foo'.
895
+ CHUNK_TAG_PATTERN = re.compile(
896
+ r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+")
897
+ )
898
+
899
+
900
+ def tag_pattern2re_pattern(tag_pattern):
901
+ """
902
+ Convert a tag pattern to a regular expression pattern. A "tag
903
+ pattern" is a modified version of a regular expression, designed
904
+ for matching sequences of tags. The differences between regular
905
+ expression patterns and tag patterns are:
906
+
907
+ - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
908
+ ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
909
+ ``'<NN'`` followed by one or more repetitions of ``'>'``.
910
+ - Whitespace in tag patterns is ignored. So
911
+ ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
912
+ - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
913
+ ``'<NN.*>'`` matches any single tag starting with ``'NN'``.
914
+
915
+ In particular, ``tag_pattern2re_pattern`` performs the following
916
+ transformations on the given pattern:
917
+
918
+ - Replace '.' with '[^<>{}]'
919
+ - Remove any whitespace
920
+ - Add extra parens around '<' and '>', to make '<' and '>' act
921
+ like parentheses. E.g., so that in '<NN>+', the '+' has scope
922
+ over the entire '<NN>'; and so that in '<NN|IN>', the '|' has
923
+ scope over 'NN' and 'IN', but not '<' or '>'.
924
+ - Check to make sure the resulting pattern is valid.
925
+
926
+ :type tag_pattern: str
927
+ :param tag_pattern: The tag pattern to convert to a regular
928
+ expression pattern.
929
+ :raise ValueError: If ``tag_pattern`` is not a valid tag pattern.
930
+ In particular, ``tag_pattern`` should not include braces; and it
931
+ should not contain nested or mismatched angle-brackets.
932
+ :rtype: str
933
+ :return: A regular expression pattern corresponding to
934
+ ``tag_pattern``.
935
+ """
936
+ # Clean up the regular expression
937
+ tag_pattern = re.sub(r"\s", "", tag_pattern)
938
+ tag_pattern = re.sub(r"<", "(<(", tag_pattern)
939
+ tag_pattern = re.sub(r">", ")>)", tag_pattern)
940
+
941
+ # Check the regular expression
942
+ if not CHUNK_TAG_PATTERN.match(tag_pattern):
943
+ raise ValueError("Bad tag pattern: %r" % tag_pattern)
944
+
945
+ # Replace "." with CHUNK_TAG_CHAR.
946
+ # We have to do this after, since it adds {}[]<>s, which would
947
+ # confuse CHUNK_TAG_PATTERN.
948
+ # PRE doesn't have lookback assertions, so reverse twice, and do
949
+ # the pattern backwards (with lookahead assertions). This can be
950
+ # made much cleaner once we can switch back to SRE.
951
+ def reverse_str(str):
952
+ lst = list(str)
953
+ lst.reverse()
954
+ return "".join(lst)
955
+
956
+ tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR)
957
+ reversed = reverse_str(tag_pattern)
958
+ reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed)
959
+ tag_pattern = reverse_str(reversed)
960
+
961
+ return tag_pattern
962
+
963
+
964
+ # //////////////////////////////////////////////////////
965
+ # RegexpChunkParser
966
+ # //////////////////////////////////////////////////////
967
+
968
+
969
+ class RegexpChunkParser(ChunkParserI):
970
+ """
971
+ A regular expression based chunk parser. ``RegexpChunkParser`` uses a
972
+ sequence of "rules" to find chunks of a single type within a
973
+ text. The chunking of the text is encoded using a ``ChunkString``,
974
+ and each rule acts by modifying the chunking in the
975
+ ``ChunkString``. The rules are all implemented using regular
976
+ expression matching and substitution.
977
+
978
+ The ``RegexpChunkRule`` class and its subclasses (``ChunkRule``,
979
+ ``StripRule``, ``UnChunkRule``, ``MergeRule``, and ``SplitRule``)
980
+ define the rules that are used by ``RegexpChunkParser``. Each rule
981
+ defines an ``apply()`` method, which modifies the chunking encoded
982
+ by a given ``ChunkString``.
983
+
984
+ :type _rules: list(RegexpChunkRule)
985
+ :ivar _rules: The list of rules that should be applied to a text.
986
+ :type _trace: int
987
+ :ivar _trace: The default level of tracing.
988
+
989
+ """
990
+
991
+ def __init__(self, rules, chunk_label="NP", root_label="S", trace=0):
992
+ """
993
+ Construct a new ``RegexpChunkParser``.
994
+
995
+ :type rules: list(RegexpChunkRule)
996
+ :param rules: The sequence of rules that should be used to
997
+ generate the chunking for a tagged text.
998
+ :type chunk_label: str
999
+ :param chunk_label: The node value that should be used for
1000
+ chunk subtrees. This is typically a short string
1001
+ describing the type of information contained by the chunk,
1002
+ such as ``"NP"`` for base noun phrases.
1003
+ :type root_label: str
1004
+ :param root_label: The node value that should be used for the
1005
+ top node of the chunk structure.
1006
+ :type trace: int
1007
+ :param trace: The level of tracing that should be used when
1008
+ parsing a text. ``0`` will generate no tracing output;
1009
+ ``1`` will generate normal tracing output; and ``2`` or
1010
+ higher will generate verbose tracing output.
1011
+ """
1012
+ self._rules = rules
1013
+ self._trace = trace
1014
+ self._chunk_label = chunk_label
1015
+ self._root_label = root_label
1016
+
1017
+ def _trace_apply(self, chunkstr, verbose):
1018
+ """
1019
+ Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
1020
+ turn. Generate trace output between each rule. If ``verbose``
1021
+ is true, then generate verbose output.
1022
+
1023
+ :type chunkstr: ChunkString
1024
+ :param chunkstr: The chunk string to which each rule should be
1025
+ applied.
1026
+ :type verbose: bool
1027
+ :param verbose: Whether output should be verbose.
1028
+ :rtype: None
1029
+ """
1030
+ print("# Input:")
1031
+ print(chunkstr)
1032
+ for rule in self._rules:
1033
+ rule.apply(chunkstr)
1034
+ if verbose:
1035
+ print("#", rule.descr() + " (" + repr(rule) + "):")
1036
+ else:
1037
+ print("#", rule.descr() + ":")
1038
+ print(chunkstr)
1039
+
1040
+ def _notrace_apply(self, chunkstr):
1041
+ """
1042
+ Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
1043
+ turn.
1044
+
1045
+ :param chunkstr: The chunk string to which each rule should be
1046
+ applied.
1047
+ :type chunkstr: ChunkString
1048
+ :rtype: None
1049
+ """
1050
+
1051
+ for rule in self._rules:
1052
+ rule.apply(chunkstr)
1053
+
1054
+ def parse(self, chunk_struct, trace=None):
1055
+ """
1056
+ :type chunk_struct: Tree
1057
+ :param chunk_struct: the chunk structure to be (further) chunked
1058
+ :type trace: int
1059
+ :param trace: The level of tracing that should be used when
1060
+ parsing a text. ``0`` will generate no tracing output;
1061
+ ``1`` will generate normal tracing output; and ``2`` or
1062
+ higher will generate verbose tracing output. This value
1063
+ overrides the trace level value that was given to the
1064
+ constructor.
1065
+ :rtype: Tree
1066
+ :return: a chunk structure that encodes the chunks in a given
1067
+ tagged sentence. A chunk is a non-overlapping linguistic
1068
+ group, such as a noun phrase. The set of chunks
1069
+ identified in the chunk structure depends on the rules
1070
+ used to define this ``RegexpChunkParser``.
1071
+ """
1072
+ if len(chunk_struct) == 0:
1073
+ print("Warning: parsing empty text")
1074
+ return Tree(self._root_label, [])
1075
+
1076
+ try:
1077
+ chunk_struct.label()
1078
+ except AttributeError:
1079
+ chunk_struct = Tree(self._root_label, chunk_struct)
1080
+
1081
+ # Use the default trace value?
1082
+ if trace is None:
1083
+ trace = self._trace
1084
+
1085
+ chunkstr = ChunkString(chunk_struct)
1086
+
1087
+ # Apply the sequence of rules to the chunkstring.
1088
+ if trace:
1089
+ verbose = trace > 1
1090
+ self._trace_apply(chunkstr, verbose)
1091
+ else:
1092
+ self._notrace_apply(chunkstr)
1093
+
1094
+ # Use the chunkstring to create a chunk structure.
1095
+ return chunkstr.to_chunkstruct(self._chunk_label)
1096
+
1097
+ def rules(self):
1098
+ """
1099
+ :return: the sequence of rules used by ``RegexpChunkParser``.
1100
+ :rtype: list(RegexpChunkRule)
1101
+ """
1102
+ return self._rules
1103
+
1104
+ def __repr__(self):
1105
+ """
1106
+ :return: a concise string representation of this
1107
+ ``RegexpChunkParser``.
1108
+ :rtype: str
1109
+ """
1110
+ return "<RegexpChunkParser with %d rules>" % len(self._rules)
1111
+
1112
+ def __str__(self):
1113
+ """
1114
+ :return: a verbose string representation of this ``RegexpChunkParser``.
1115
+ :rtype: str
1116
+ """
1117
+ s = "RegexpChunkParser with %d rules:\n" % len(self._rules)
1118
+ margin = 0
1119
+ for rule in self._rules:
1120
+ margin = max(margin, len(rule.descr()))
1121
+ if margin < 35:
1122
+ format = " %" + repr(-(margin + 3)) + "s%s\n"
1123
+ else:
1124
+ format = " %s\n %s\n"
1125
+ for rule in self._rules:
1126
+ s += format % (rule.descr(), repr(rule))
1127
+ return s[:-1]
1128
+
1129
+
1130
+ # //////////////////////////////////////////////////////
1131
+ # Chunk Grammar
1132
+ # //////////////////////////////////////////////////////
1133
+
1134
+
1135
+ class RegexpParser(ChunkParserI):
1136
+ r"""
1137
+ A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of
1138
+ regular expression patterns to specify the behavior of the parser.
1139
+ The chunking of the text is encoded using a ``ChunkString``, and
1140
+ each rule acts by modifying the chunking in the ``ChunkString``.
1141
+ The rules are all implemented using regular expression matching
1142
+ and substitution.
1143
+
1144
+ A grammar contains one or more clauses in the following form::
1145
+
1146
+ NP:
1147
+ {<DT|JJ>} # chunk determiners and adjectives
1148
+ }<[\.VI].*>+{ # strip any tag beginning with V, I, or .
1149
+ <.*>}{<DT> # split a chunk at a determiner
1150
+ <DT|JJ>{}<NN.*> # merge chunk ending with det/adj
1151
+ # with one starting with a noun
1152
+
1153
+ The patterns of a clause are executed in order. An earlier
1154
+ pattern may introduce a chunk boundary that prevents a later
1155
+ pattern from executing. Sometimes an individual pattern will
1156
+ match on multiple, overlapping extents of the input. As with
1157
+ regular expression substitution more generally, the chunker will
1158
+ identify the first match possible, then continue looking for matches
1159
+ after this one has ended.
1160
+
1161
+ The clauses of a grammar are also executed in order. A cascaded
1162
+ chunk parser is one having more than one clause. The maximum depth
1163
+ of a parse tree created by this chunk parser is the same as the
1164
+ number of clauses in the grammar.
1165
+
1166
+ When tracing is turned on, the comment portion of a line is displayed
1167
+ each time the corresponding pattern is applied.
1168
+
1169
+ :type _start: str
1170
+ :ivar _start: The start symbol of the grammar (the root node of
1171
+ resulting trees)
1172
+ :type _stages: int
1173
+ :ivar _stages: The list of parsing stages corresponding to the grammar
1174
+
1175
+ """
1176
+
1177
+ def __init__(self, grammar, root_label="S", loop=1, trace=0):
1178
+ """
1179
+ Create a new chunk parser, from the given start state
1180
+ and set of chunk patterns.
1181
+
1182
+ :param grammar: The grammar, or a list of RegexpChunkParser objects
1183
+ :type grammar: str or list(RegexpChunkParser)
1184
+ :param root_label: The top node of the tree being created
1185
+ :type root_label: str or Nonterminal
1186
+ :param loop: The number of times to run through the patterns
1187
+ :type loop: int
1188
+ :type trace: int
1189
+ :param trace: The level of tracing that should be used when
1190
+ parsing a text. ``0`` will generate no tracing output;
1191
+ ``1`` will generate normal tracing output; and ``2`` or
1192
+ higher will generate verbose tracing output.
1193
+ """
1194
+ self._trace = trace
1195
+ self._stages = []
1196
+ self._grammar = grammar
1197
+ self._loop = loop
1198
+
1199
+ if isinstance(grammar, str):
1200
+ self._read_grammar(grammar, root_label, trace)
1201
+ else:
1202
+ # Make sur the grammar looks like it has the right type:
1203
+ type_err = (
1204
+ "Expected string or list of RegexpChunkParsers " "for the grammar."
1205
+ )
1206
+ try:
1207
+ grammar = list(grammar)
1208
+ except BaseException as e:
1209
+ raise TypeError(type_err) from e
1210
+ for elt in grammar:
1211
+ if not isinstance(elt, RegexpChunkParser):
1212
+ raise TypeError(type_err)
1213
+ self._stages = grammar
1214
+
1215
+ def _read_grammar(self, grammar, root_label, trace):
1216
+ """
1217
+ Helper function for __init__: read the grammar if it is a
1218
+ string.
1219
+ """
1220
+ rules = []
1221
+ lhs = None
1222
+ pattern = regex.compile("(?P<nonterminal>(\\.|[^:])*)(:(?P<rule>.*))")
1223
+ for line in grammar.split("\n"):
1224
+ line = line.strip()
1225
+
1226
+ # New stage begins if there's an unescaped ':'
1227
+ m = pattern.match(line)
1228
+ if m:
1229
+ # Record the stage that we just completed.
1230
+ self._add_stage(rules, lhs, root_label, trace)
1231
+ # Start a new stage.
1232
+ lhs = m.group("nonterminal").strip()
1233
+ rules = []
1234
+ line = m.group("rule").strip()
1235
+
1236
+ # Skip blank & comment-only lines
1237
+ if line == "" or line.startswith("#"):
1238
+ continue
1239
+
1240
+ # Add the rule
1241
+ rules.append(RegexpChunkRule.fromstring(line))
1242
+
1243
+ # Record the final stage
1244
+ self._add_stage(rules, lhs, root_label, trace)
1245
+
1246
+ def _add_stage(self, rules, lhs, root_label, trace):
1247
+ """
1248
+ Helper function for __init__: add a new stage to the parser.
1249
+ """
1250
+ if rules != []:
1251
+ if not lhs:
1252
+ raise ValueError("Expected stage marker (eg NP:)")
1253
+ parser = RegexpChunkParser(
1254
+ rules, chunk_label=lhs, root_label=root_label, trace=trace
1255
+ )
1256
+ self._stages.append(parser)
1257
+
1258
+ def parse(self, chunk_struct, trace=None):
1259
+ """
1260
+ Apply the chunk parser to this input.
1261
+
1262
+ :type chunk_struct: Tree
1263
+ :param chunk_struct: the chunk structure to be (further) chunked
1264
+ (this tree is modified, and is also returned)
1265
+ :type trace: int
1266
+ :param trace: The level of tracing that should be used when
1267
+ parsing a text. ``0`` will generate no tracing output;
1268
+ ``1`` will generate normal tracing output; and ``2`` or
1269
+ higher will generate verbose tracing output. This value
1270
+ overrides the trace level value that was given to the
1271
+ constructor.
1272
+ :return: the chunked output.
1273
+ :rtype: Tree
1274
+ """
1275
+ if trace is None:
1276
+ trace = self._trace
1277
+ for i in range(self._loop):
1278
+ for parser in self._stages:
1279
+ chunk_struct = parser.parse(chunk_struct, trace=trace)
1280
+ return chunk_struct
1281
+
1282
+ def __repr__(self):
1283
+ """
1284
+ :return: a concise string representation of this ``chunk.RegexpParser``.
1285
+ :rtype: str
1286
+ """
1287
+ return "<chunk.RegexpParser with %d stages>" % len(self._stages)
1288
+
1289
+ def __str__(self):
1290
+ """
1291
+ :return: a verbose string representation of this
1292
+ ``RegexpParser``.
1293
+ :rtype: str
1294
+ """
1295
+ s = "chunk.RegexpParser with %d stages:\n" % len(self._stages)
1296
+ margin = 0
1297
+ for parser in self._stages:
1298
+ s += "%s\n" % parser
1299
+ return s[:-1]
1300
+
1301
+
1302
+ # //////////////////////////////////////////////////////
1303
+ # Demonstration code
1304
+ # //////////////////////////////////////////////////////
1305
+
1306
+
1307
+ def demo_eval(chunkparser, text):
1308
+ """
1309
+ Demonstration code for evaluating a chunk parser, using a
1310
+ ``ChunkScore``. This function assumes that ``text`` contains one
1311
+ sentence per line, and that each sentence has the form expected by
1312
+ ``tree.chunk``. It runs the given chunk parser on each sentence in
1313
+ the text, and scores the result. It prints the final score
1314
+ (precision, recall, and f-measure); and reports the set of chunks
1315
+ that were missed and the set of chunks that were incorrect. (At
1316
+ most 10 missing chunks and 10 incorrect chunks are reported).
1317
+
1318
+ :param chunkparser: The chunkparser to be tested
1319
+ :type chunkparser: ChunkParserI
1320
+ :param text: The chunked tagged text that should be used for
1321
+ evaluation.
1322
+ :type text: str
1323
+ """
1324
+ from nltk import chunk
1325
+ from nltk.tree import Tree
1326
+
1327
+ # Evaluate our chunk parser.
1328
+ chunkscore = chunk.ChunkScore()
1329
+
1330
+ for sentence in text.split("\n"):
1331
+ print(sentence)
1332
+ sentence = sentence.strip()
1333
+ if not sentence:
1334
+ continue
1335
+ gold = chunk.tagstr2tree(sentence)
1336
+ tokens = gold.leaves()
1337
+ test = chunkparser.parse(Tree("S", tokens), trace=1)
1338
+ chunkscore.score(gold, test)
1339
+ print()
1340
+
1341
+ print("/" + ("=" * 75) + "\\")
1342
+ print("Scoring", chunkparser)
1343
+ print("-" * 77)
1344
+ print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ")
1345
+ print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ")
1346
+ print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100))
1347
+
1348
+ # Missed chunks.
1349
+ if chunkscore.missed():
1350
+ print("Missed:")
1351
+ missed = chunkscore.missed()
1352
+ for chunk in missed[:10]:
1353
+ print(" ", " ".join(map(str, chunk)))
1354
+ if len(chunkscore.missed()) > 10:
1355
+ print(" ...")
1356
+
1357
+ # Incorrect chunks.
1358
+ if chunkscore.incorrect():
1359
+ print("Incorrect:")
1360
+ incorrect = chunkscore.incorrect()
1361
+ for chunk in incorrect[:10]:
1362
+ print(" ", " ".join(map(str, chunk)))
1363
+ if len(chunkscore.incorrect()) > 10:
1364
+ print(" ...")
1365
+
1366
+ print("\\" + ("=" * 75) + "/")
1367
+ print()
1368
+
1369
+
1370
+ def demo():
1371
+ """
1372
+ A demonstration for the ``RegexpChunkParser`` class. A single text is
1373
+ parsed with four different chunk parsers, using a variety of rules
1374
+ and strategies.
1375
+ """
1376
+
1377
+ from nltk import Tree, chunk
1378
+
1379
+ text = """\
1380
+ [ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./.
1381
+ [ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./.
1382
+ [ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./.
1383
+ """
1384
+
1385
+ print("*" * 75)
1386
+ print("Evaluation text:")
1387
+ print(text)
1388
+ print("*" * 75)
1389
+ print()
1390
+
1391
+ grammar = r"""
1392
+ NP: # NP stage
1393
+ {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
1394
+ {<NNP>+} # chunk proper nouns
1395
+ """
1396
+ cp = chunk.RegexpParser(grammar)
1397
+ demo_eval(cp, text)
1398
+
1399
+ grammar = r"""
1400
+ NP:
1401
+ {<.*>} # start by chunking each tag
1402
+ }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
1403
+ <DT|JJ>{}<NN.*> # merge det/adj with nouns
1404
+ """
1405
+ cp = chunk.RegexpParser(grammar)
1406
+ demo_eval(cp, text)
1407
+
1408
+ grammar = r"""
1409
+ NP: {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
1410
+ VP: {<TO>?<VB.*>} # VP = verb words
1411
+ """
1412
+ cp = chunk.RegexpParser(grammar)
1413
+ demo_eval(cp, text)
1414
+
1415
+ grammar = r"""
1416
+ NP: {<.*>*} # start by chunking everything
1417
+ }<[\.VI].*>+{ # strip any verbs, prepositions or periods
1418
+ <.*>}{<DT> # separate on determiners
1419
+ PP: {<IN><NP>} # PP = preposition + noun phrase
1420
+ VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
1421
+ """
1422
+ cp = chunk.RegexpParser(grammar)
1423
+ demo_eval(cp, text)
1424
+
1425
+ # Evaluation
1426
+
1427
+ from nltk.corpus import conll2000
1428
+
1429
+ print()
1430
+ print("Demonstration of empty grammar:")
1431
+
1432
+ cp = chunk.RegexpParser("")
1433
+ print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",))))
1434
+
1435
+ print()
1436
+ print("Demonstration of accuracy evaluation using CoNLL tags:")
1437
+
1438
+ grammar = r"""
1439
+ NP:
1440
+ {<.*>} # start by chunking each tag
1441
+ }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
1442
+ <DT|JJ>{}<NN.*> # merge det/adj with nouns
1443
+ """
1444
+ cp = chunk.RegexpParser(grammar)
1445
+ print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5]))
1446
+
1447
+ print()
1448
+ print("Demonstration of tagged token input")
1449
+
1450
+ grammar = r"""
1451
+ NP: {<.*>*} # start by chunking everything
1452
+ }<[\.VI].*>+{ # strip any verbs, prepositions or periods
1453
+ <.*>}{<DT> # separate on determiners
1454
+ PP: {<IN><NP>} # PP = preposition + noun phrase
1455
+ VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
1456
+ """
1457
+ cp = chunk.RegexpParser(grammar)
1458
+ print(
1459
+ cp.parse(
1460
+ [
1461
+ ("the", "DT"),
1462
+ ("little", "JJ"),
1463
+ ("cat", "NN"),
1464
+ ("sat", "VBD"),
1465
+ ("on", "IN"),
1466
+ ("the", "DT"),
1467
+ ("mat", "NN"),
1468
+ (".", "."),
1469
+ ]
1470
+ )
1471
+ )
1472
+
1473
+
1474
+ if __name__ == "__main__":
1475
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/chunk/util.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunk format conversions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import re
10
+
11
+ from nltk.metrics import accuracy as _accuracy
12
+ from nltk.tag.mapping import map_tag
13
+ from nltk.tag.util import str2tuple
14
+ from nltk.tree import Tree
15
+
16
+ ##//////////////////////////////////////////////////////
17
+ ## EVALUATION
18
+ ##//////////////////////////////////////////////////////
19
+
20
+
21
+ def accuracy(chunker, gold):
22
+ """
23
+ Score the accuracy of the chunker against the gold standard.
24
+ Strip the chunk information from the gold standard and rechunk it using
25
+ the chunker, then compute the accuracy score.
26
+
27
+ :type chunker: ChunkParserI
28
+ :param chunker: The chunker being evaluated.
29
+ :type gold: tree
30
+ :param gold: The chunk structures to score the chunker on.
31
+ :rtype: float
32
+ """
33
+
34
+ gold_tags = []
35
+ test_tags = []
36
+ for gold_tree in gold:
37
+ test_tree = chunker.parse(gold_tree.flatten())
38
+ gold_tags += tree2conlltags(gold_tree)
39
+ test_tags += tree2conlltags(test_tree)
40
+
41
+ # print 'GOLD:', gold_tags[:50]
42
+ # print 'TEST:', test_tags[:50]
43
+ return _accuracy(gold_tags, test_tags)
44
+
45
+
46
+ # Patched for increased performance by Yoav Goldberg <[email protected]>, 2006-01-13
47
+ # -- statistics are evaluated only on demand, instead of at every sentence evaluation
48
+ #
49
+ # SB: use nltk.metrics for precision/recall scoring?
50
+ #
51
+ class ChunkScore:
52
+ """
53
+ A utility class for scoring chunk parsers. ``ChunkScore`` can
54
+ evaluate a chunk parser's output, based on a number of statistics
55
+ (precision, recall, f-measure, misssed chunks, incorrect chunks).
56
+ It can also combine the scores from the parsing of multiple texts;
57
+ this makes it significantly easier to evaluate a chunk parser that
58
+ operates one sentence at a time.
59
+
60
+ Texts are evaluated with the ``score`` method. The results of
61
+ evaluation can be accessed via a number of accessor methods, such
62
+ as ``precision`` and ``f_measure``. A typical use of the
63
+ ``ChunkScore`` class is::
64
+
65
+ >>> chunkscore = ChunkScore() # doctest: +SKIP
66
+ >>> for correct in correct_sentences: # doctest: +SKIP
67
+ ... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP
68
+ ... chunkscore.score(correct, guess) # doctest: +SKIP
69
+ >>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP
70
+ F Measure: 0.823
71
+
72
+ :ivar kwargs: Keyword arguments:
73
+
74
+ - max_tp_examples: The maximum number actual examples of true
75
+ positives to record. This affects the ``correct`` member
76
+ function: ``correct`` will not return more than this number
77
+ of true positive examples. This does *not* affect any of
78
+ the numerical metrics (precision, recall, or f-measure)
79
+
80
+ - max_fp_examples: The maximum number actual examples of false
81
+ positives to record. This affects the ``incorrect`` member
82
+ function and the ``guessed`` member function: ``incorrect``
83
+ will not return more than this number of examples, and
84
+ ``guessed`` will not return more than this number of true
85
+ positive examples. This does *not* affect any of the
86
+ numerical metrics (precision, recall, or f-measure)
87
+
88
+ - max_fn_examples: The maximum number actual examples of false
89
+ negatives to record. This affects the ``missed`` member
90
+ function and the ``correct`` member function: ``missed``
91
+ will not return more than this number of examples, and
92
+ ``correct`` will not return more than this number of true
93
+ negative examples. This does *not* affect any of the
94
+ numerical metrics (precision, recall, or f-measure)
95
+
96
+ - chunk_label: A regular expression indicating which chunks
97
+ should be compared. Defaults to ``'.*'`` (i.e., all chunks).
98
+
99
+ :type _tp: list(Token)
100
+ :ivar _tp: List of true positives
101
+ :type _fp: list(Token)
102
+ :ivar _fp: List of false positives
103
+ :type _fn: list(Token)
104
+ :ivar _fn: List of false negatives
105
+
106
+ :type _tp_num: int
107
+ :ivar _tp_num: Number of true positives
108
+ :type _fp_num: int
109
+ :ivar _fp_num: Number of false positives
110
+ :type _fn_num: int
111
+ :ivar _fn_num: Number of false negatives.
112
+ """
113
+
114
+ def __init__(self, **kwargs):
115
+ self._correct = set()
116
+ self._guessed = set()
117
+ self._tp = set()
118
+ self._fp = set()
119
+ self._fn = set()
120
+ self._max_tp = kwargs.get("max_tp_examples", 100)
121
+ self._max_fp = kwargs.get("max_fp_examples", 100)
122
+ self._max_fn = kwargs.get("max_fn_examples", 100)
123
+ self._chunk_label = kwargs.get("chunk_label", ".*")
124
+ self._tp_num = 0
125
+ self._fp_num = 0
126
+ self._fn_num = 0
127
+ self._count = 0
128
+ self._tags_correct = 0.0
129
+ self._tags_total = 0.0
130
+
131
+ self._measuresNeedUpdate = False
132
+
133
+ def _updateMeasures(self):
134
+ if self._measuresNeedUpdate:
135
+ self._tp = self._guessed & self._correct
136
+ self._fn = self._correct - self._guessed
137
+ self._fp = self._guessed - self._correct
138
+ self._tp_num = len(self._tp)
139
+ self._fp_num = len(self._fp)
140
+ self._fn_num = len(self._fn)
141
+ self._measuresNeedUpdate = False
142
+
143
+ def score(self, correct, guessed):
144
+ """
145
+ Given a correctly chunked sentence, score another chunked
146
+ version of the same sentence.
147
+
148
+ :type correct: chunk structure
149
+ :param correct: The known-correct ("gold standard") chunked
150
+ sentence.
151
+ :type guessed: chunk structure
152
+ :param guessed: The chunked sentence to be scored.
153
+ """
154
+ self._correct |= _chunksets(correct, self._count, self._chunk_label)
155
+ self._guessed |= _chunksets(guessed, self._count, self._chunk_label)
156
+ self._count += 1
157
+ self._measuresNeedUpdate = True
158
+ # Keep track of per-tag accuracy (if possible)
159
+ try:
160
+ correct_tags = tree2conlltags(correct)
161
+ guessed_tags = tree2conlltags(guessed)
162
+ except ValueError:
163
+ # This exception case is for nested chunk structures,
164
+ # where tree2conlltags will fail with a ValueError: "Tree
165
+ # is too deeply nested to be printed in CoNLL format."
166
+ correct_tags = guessed_tags = ()
167
+ self._tags_total += len(correct_tags)
168
+ self._tags_correct += sum(
169
+ 1 for (t, g) in zip(guessed_tags, correct_tags) if t == g
170
+ )
171
+
172
+ def accuracy(self):
173
+ """
174
+ Return the overall tag-based accuracy for all text that have
175
+ been scored by this ``ChunkScore``, using the IOB (conll2000)
176
+ tag encoding.
177
+
178
+ :rtype: float
179
+ """
180
+ if self._tags_total == 0:
181
+ return 1
182
+ return self._tags_correct / self._tags_total
183
+
184
+ def precision(self):
185
+ """
186
+ Return the overall precision for all texts that have been
187
+ scored by this ``ChunkScore``.
188
+
189
+ :rtype: float
190
+ """
191
+ self._updateMeasures()
192
+ div = self._tp_num + self._fp_num
193
+ if div == 0:
194
+ return 0
195
+ else:
196
+ return self._tp_num / div
197
+
198
+ def recall(self):
199
+ """
200
+ Return the overall recall for all texts that have been
201
+ scored by this ``ChunkScore``.
202
+
203
+ :rtype: float
204
+ """
205
+ self._updateMeasures()
206
+ div = self._tp_num + self._fn_num
207
+ if div == 0:
208
+ return 0
209
+ else:
210
+ return self._tp_num / div
211
+
212
+ def f_measure(self, alpha=0.5):
213
+ """
214
+ Return the overall F measure for all texts that have been
215
+ scored by this ``ChunkScore``.
216
+
217
+ :param alpha: the relative weighting of precision and recall.
218
+ Larger alpha biases the score towards the precision value,
219
+ while smaller alpha biases the score towards the recall
220
+ value. ``alpha`` should have a value in the range [0,1].
221
+ :type alpha: float
222
+ :rtype: float
223
+ """
224
+ self._updateMeasures()
225
+ p = self.precision()
226
+ r = self.recall()
227
+ if p == 0 or r == 0: # what if alpha is 0 or 1?
228
+ return 0
229
+ return 1 / (alpha / p + (1 - alpha) / r)
230
+
231
+ def missed(self):
232
+ """
233
+ Return the chunks which were included in the
234
+ correct chunk structures, but not in the guessed chunk
235
+ structures, listed in input order.
236
+
237
+ :rtype: list of chunks
238
+ """
239
+ self._updateMeasures()
240
+ chunks = list(self._fn)
241
+ return [c[1] for c in chunks] # discard position information
242
+
243
+ def incorrect(self):
244
+ """
245
+ Return the chunks which were included in the guessed chunk structures,
246
+ but not in the correct chunk structures, listed in input order.
247
+
248
+ :rtype: list of chunks
249
+ """
250
+ self._updateMeasures()
251
+ chunks = list(self._fp)
252
+ return [c[1] for c in chunks] # discard position information
253
+
254
+ def correct(self):
255
+ """
256
+ Return the chunks which were included in the correct
257
+ chunk structures, listed in input order.
258
+
259
+ :rtype: list of chunks
260
+ """
261
+ chunks = list(self._correct)
262
+ return [c[1] for c in chunks] # discard position information
263
+
264
+ def guessed(self):
265
+ """
266
+ Return the chunks which were included in the guessed
267
+ chunk structures, listed in input order.
268
+
269
+ :rtype: list of chunks
270
+ """
271
+ chunks = list(self._guessed)
272
+ return [c[1] for c in chunks] # discard position information
273
+
274
+ def __len__(self):
275
+ self._updateMeasures()
276
+ return self._tp_num + self._fn_num
277
+
278
+ def __repr__(self):
279
+ """
280
+ Return a concise representation of this ``ChunkScoring``.
281
+
282
+ :rtype: str
283
+ """
284
+ return "<ChunkScoring of " + repr(len(self)) + " chunks>"
285
+
286
+ def __str__(self):
287
+ """
288
+ Return a verbose representation of this ``ChunkScoring``.
289
+ This representation includes the precision, recall, and
290
+ f-measure scores. For other information about the score,
291
+ use the accessor methods (e.g., ``missed()`` and ``incorrect()``).
292
+
293
+ :rtype: str
294
+ """
295
+ return (
296
+ "ChunkParse score:\n"
297
+ + (f" IOB Accuracy: {self.accuracy() * 100:5.1f}%%\n")
298
+ + (f" Precision: {self.precision() * 100:5.1f}%%\n")
299
+ + (f" Recall: {self.recall() * 100:5.1f}%%\n")
300
+ + (f" F-Measure: {self.f_measure() * 100:5.1f}%%")
301
+ )
302
+
303
+
304
+ # extract chunks, and assign unique id, the absolute position of
305
+ # the first word of the chunk
306
+ def _chunksets(t, count, chunk_label):
307
+ pos = 0
308
+ chunks = []
309
+ for child in t:
310
+ if isinstance(child, Tree):
311
+ if re.match(chunk_label, child.label()):
312
+ chunks.append(((count, pos), child.freeze()))
313
+ pos += len(child.leaves())
314
+ else:
315
+ pos += 1
316
+ return set(chunks)
317
+
318
+
319
+ def tagstr2tree(
320
+ s, chunk_label="NP", root_label="S", sep="/", source_tagset=None, target_tagset=None
321
+ ):
322
+ """
323
+ Divide a string of bracketted tagged text into
324
+ chunks and unchunked tokens, and produce a Tree.
325
+ Chunks are marked by square brackets (``[...]``). Words are
326
+ delimited by whitespace, and each word should have the form
327
+ ``text/tag``. Words that do not contain a slash are
328
+ assigned a ``tag`` of None.
329
+
330
+ :param s: The string to be converted
331
+ :type s: str
332
+ :param chunk_label: The label to use for chunk nodes
333
+ :type chunk_label: str
334
+ :param root_label: The label to use for the root of the tree
335
+ :type root_label: str
336
+ :rtype: Tree
337
+ """
338
+
339
+ WORD_OR_BRACKET = re.compile(r"\[|\]|[^\[\]\s]+")
340
+
341
+ stack = [Tree(root_label, [])]
342
+ for match in WORD_OR_BRACKET.finditer(s):
343
+ text = match.group()
344
+ if text[0] == "[":
345
+ if len(stack) != 1:
346
+ raise ValueError(f"Unexpected [ at char {match.start():d}")
347
+ chunk = Tree(chunk_label, [])
348
+ stack[-1].append(chunk)
349
+ stack.append(chunk)
350
+ elif text[0] == "]":
351
+ if len(stack) != 2:
352
+ raise ValueError(f"Unexpected ] at char {match.start():d}")
353
+ stack.pop()
354
+ else:
355
+ if sep is None:
356
+ stack[-1].append(text)
357
+ else:
358
+ word, tag = str2tuple(text, sep)
359
+ if source_tagset and target_tagset:
360
+ tag = map_tag(source_tagset, target_tagset, tag)
361
+ stack[-1].append((word, tag))
362
+
363
+ if len(stack) != 1:
364
+ raise ValueError(f"Expected ] at char {len(s):d}")
365
+ return stack[0]
366
+
367
+
368
+ ### CONLL
369
+
370
+ _LINE_RE = re.compile(r"(\S+)\s+(\S+)\s+([IOB])-?(\S+)?")
371
+
372
+
373
+ def conllstr2tree(s, chunk_types=("NP", "PP", "VP"), root_label="S"):
374
+ """
375
+ Return a chunk structure for a single sentence
376
+ encoded in the given CONLL 2000 style string.
377
+ This function converts a CoNLL IOB string into a tree.
378
+ It uses the specified chunk types
379
+ (defaults to NP, PP and VP), and creates a tree rooted at a node
380
+ labeled S (by default).
381
+
382
+ :param s: The CoNLL string to be converted.
383
+ :type s: str
384
+ :param chunk_types: The chunk types to be converted.
385
+ :type chunk_types: tuple
386
+ :param root_label: The node label to use for the root.
387
+ :type root_label: str
388
+ :rtype: Tree
389
+ """
390
+
391
+ stack = [Tree(root_label, [])]
392
+
393
+ for lineno, line in enumerate(s.split("\n")):
394
+ if not line.strip():
395
+ continue
396
+
397
+ # Decode the line.
398
+ match = _LINE_RE.match(line)
399
+ if match is None:
400
+ raise ValueError(f"Error on line {lineno:d}")
401
+ (word, tag, state, chunk_type) = match.groups()
402
+
403
+ # If it's a chunk type we don't care about, treat it as O.
404
+ if chunk_types is not None and chunk_type not in chunk_types:
405
+ state = "O"
406
+
407
+ # For "Begin"/"Outside", finish any completed chunks -
408
+ # also do so for "Inside" which don't match the previous token.
409
+ mismatch_I = state == "I" and chunk_type != stack[-1].label()
410
+ if state in "BO" or mismatch_I:
411
+ if len(stack) == 2:
412
+ stack.pop()
413
+
414
+ # For "Begin", start a new chunk.
415
+ if state == "B" or mismatch_I:
416
+ chunk = Tree(chunk_type, [])
417
+ stack[-1].append(chunk)
418
+ stack.append(chunk)
419
+
420
+ # Add the new word token.
421
+ stack[-1].append((word, tag))
422
+
423
+ return stack[0]
424
+
425
+
426
+ def tree2conlltags(t):
427
+ """
428
+ Return a list of 3-tuples containing ``(word, tag, IOB-tag)``.
429
+ Convert a tree to the CoNLL IOB tag format.
430
+
431
+ :param t: The tree to be converted.
432
+ :type t: Tree
433
+ :rtype: list(tuple)
434
+ """
435
+
436
+ tags = []
437
+ for child in t:
438
+ try:
439
+ category = child.label()
440
+ prefix = "B-"
441
+ for contents in child:
442
+ if isinstance(contents, Tree):
443
+ raise ValueError(
444
+ "Tree is too deeply nested to be printed in CoNLL format"
445
+ )
446
+ tags.append((contents[0], contents[1], prefix + category))
447
+ prefix = "I-"
448
+ except AttributeError:
449
+ tags.append((child[0], child[1], "O"))
450
+ return tags
451
+
452
+
453
+ def conlltags2tree(
454
+ sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False
455
+ ):
456
+ """
457
+ Convert the CoNLL IOB format to a tree.
458
+ """
459
+ tree = Tree(root_label, [])
460
+ for (word, postag, chunktag) in sentence:
461
+ if chunktag is None:
462
+ if strict:
463
+ raise ValueError("Bad conll tag sequence")
464
+ else:
465
+ # Treat as O
466
+ tree.append((word, postag))
467
+ elif chunktag.startswith("B-"):
468
+ tree.append(Tree(chunktag[2:], [(word, postag)]))
469
+ elif chunktag.startswith("I-"):
470
+ if (
471
+ len(tree) == 0
472
+ or not isinstance(tree[-1], Tree)
473
+ or tree[-1].label() != chunktag[2:]
474
+ ):
475
+ if strict:
476
+ raise ValueError("Bad conll tag sequence")
477
+ else:
478
+ # Treat as B-*
479
+ tree.append(Tree(chunktag[2:], [(word, postag)]))
480
+ else:
481
+ tree[-1].append((word, postag))
482
+ elif chunktag == "O":
483
+ tree.append((word, postag))
484
+ else:
485
+ raise ValueError(f"Bad conll tag {chunktag!r}")
486
+ return tree
487
+
488
+
489
+ def tree2conllstr(t):
490
+ """
491
+ Return a multiline string where each line contains a word, tag and IOB tag.
492
+ Convert a tree to the CoNLL IOB string format
493
+
494
+ :param t: The tree to be converted.
495
+ :type t: Tree
496
+ :rtype: str
497
+ """
498
+ lines = [" ".join(token) for token in tree2conlltags(t)]
499
+ return "\n".join(lines)
500
+
501
+
502
+ ### IEER
503
+
504
+ _IEER_DOC_RE = re.compile(
505
+ r"<DOC>\s*"
506
+ r"(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?"
507
+ r"(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?"
508
+ r"(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?"
509
+ r"<BODY>\s*"
510
+ r"(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?"
511
+ r"<TEXT>(?P<text>.*?)</TEXT>\s*"
512
+ r"</BODY>\s*</DOC>\s*",
513
+ re.DOTALL,
514
+ )
515
+
516
+ _IEER_TYPE_RE = re.compile(r'<b_\w+\s+[^>]*?type="(?P<type>\w+)"')
517
+
518
+
519
+ def _ieer_read_text(s, root_label):
520
+ stack = [Tree(root_label, [])]
521
+ # s will be None if there is no headline in the text
522
+ # return the empty list in place of a Tree
523
+ if s is None:
524
+ return []
525
+ for piece_m in re.finditer(r"<[^>]+>|[^\s<]+", s):
526
+ piece = piece_m.group()
527
+ try:
528
+ if piece.startswith("<b_"):
529
+ m = _IEER_TYPE_RE.match(piece)
530
+ if m is None:
531
+ print("XXXX", piece)
532
+ chunk = Tree(m.group("type"), [])
533
+ stack[-1].append(chunk)
534
+ stack.append(chunk)
535
+ elif piece.startswith("<e_"):
536
+ stack.pop()
537
+ # elif piece.startswith('<'):
538
+ # print "ERROR:", piece
539
+ # raise ValueError # Unexpected HTML
540
+ else:
541
+ stack[-1].append(piece)
542
+ except (IndexError, ValueError) as e:
543
+ raise ValueError(
544
+ f"Bad IEER string (error at character {piece_m.start():d})"
545
+ ) from e
546
+ if len(stack) != 1:
547
+ raise ValueError("Bad IEER string")
548
+ return stack[0]
549
+
550
+
551
+ def ieerstr2tree(
552
+ s,
553
+ chunk_types=[
554
+ "LOCATION",
555
+ "ORGANIZATION",
556
+ "PERSON",
557
+ "DURATION",
558
+ "DATE",
559
+ "CARDINAL",
560
+ "PERCENT",
561
+ "MONEY",
562
+ "MEASURE",
563
+ ],
564
+ root_label="S",
565
+ ):
566
+ """
567
+ Return a chunk structure containing the chunked tagged text that is
568
+ encoded in the given IEER style string.
569
+ Convert a string of chunked tagged text in the IEER named
570
+ entity format into a chunk structure. Chunks are of several
571
+ types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
572
+ PERCENT, MONEY, and MEASURE.
573
+
574
+ :rtype: Tree
575
+ """
576
+
577
+ # Try looking for a single document. If that doesn't work, then just
578
+ # treat everything as if it was within the <TEXT>...</TEXT>.
579
+ m = _IEER_DOC_RE.match(s)
580
+ if m:
581
+ return {
582
+ "text": _ieer_read_text(m.group("text"), root_label),
583
+ "docno": m.group("docno"),
584
+ "doctype": m.group("doctype"),
585
+ "date_time": m.group("date_time"),
586
+ #'headline': m.group('headline')
587
+ # we want to capture NEs in the headline too!
588
+ "headline": _ieer_read_text(m.group("headline"), root_label),
589
+ }
590
+ else:
591
+ return _ieer_read_text(s, root_label)
592
+
593
+
594
+ def demo():
595
+
596
+ s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
597
+ import nltk
598
+
599
+ t = nltk.chunk.tagstr2tree(s, chunk_label="NP")
600
+ t.pprint()
601
+ print()
602
+
603
+ s = """
604
+ These DT B-NP
605
+ research NN I-NP
606
+ protocols NNS I-NP
607
+ offer VBP B-VP
608
+ to TO B-PP
609
+ the DT B-NP
610
+ patient NN I-NP
611
+ not RB O
612
+ only RB O
613
+ the DT B-NP
614
+ very RB I-NP
615
+ best JJS I-NP
616
+ therapy NN I-NP
617
+ which WDT B-NP
618
+ we PRP B-NP
619
+ have VBP B-VP
620
+ established VBN I-VP
621
+ today NN B-NP
622
+ but CC B-NP
623
+ also RB I-NP
624
+ the DT B-NP
625
+ hope NN I-NP
626
+ of IN B-PP
627
+ something NN B-NP
628
+ still RB B-ADJP
629
+ better JJR I-ADJP
630
+ . . O
631
+ """
632
+
633
+ conll_tree = conllstr2tree(s, chunk_types=("NP", "PP"))
634
+ conll_tree.pprint()
635
+
636
+ # Demonstrate CoNLL output
637
+ print("CoNLL output:")
638
+ print(nltk.chunk.tree2conllstr(conll_tree))
639
+ print()
640
+
641
+
642
+ if __name__ == "__main__":
643
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/cli.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NLTK Command-Line Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+
8
+ import click
9
+ from tqdm import tqdm
10
+
11
+ from nltk import word_tokenize
12
+ from nltk.util import parallelize_preprocess
13
+
14
+ CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
15
+
16
+
17
+ @click.group(context_settings=CONTEXT_SETTINGS)
18
+ @click.version_option()
19
+ def cli():
20
+ pass
21
+
22
+
23
+ @cli.command("tokenize")
24
+ @click.option(
25
+ "--language",
26
+ "-l",
27
+ default="en",
28
+ help="The language for the Punkt sentence tokenization.",
29
+ )
30
+ @click.option(
31
+ "--preserve-line",
32
+ "-l",
33
+ default=True,
34
+ is_flag=True,
35
+ help="An option to keep the preserve the sentence and not sentence tokenize it.",
36
+ )
37
+ @click.option("--processes", "-j", default=1, help="No. of processes.")
38
+ @click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.")
39
+ @click.option(
40
+ "--delimiter", "-d", default=" ", help="Specify delimiter to join the tokens."
41
+ )
42
+ def tokenize_file(language, preserve_line, processes, encoding, delimiter):
43
+ """This command tokenizes text stream using nltk.word_tokenize"""
44
+ with click.get_text_stream("stdin", encoding=encoding) as fin:
45
+ with click.get_text_stream("stdout", encoding=encoding) as fout:
46
+ # If it's single process, joblib parallelization is slower,
47
+ # so just process line by line normally.
48
+ if processes == 1:
49
+ for line in tqdm(fin.readlines()):
50
+ print(delimiter.join(word_tokenize(line)), end="\n", file=fout)
51
+ else:
52
+ for outline in parallelize_preprocess(
53
+ word_tokenize, fin.readlines(), processes, progress_bar=True
54
+ ):
55
+ print(delimiter.join(outline), end="\n", file=fout)
llmeval-env/lib/python3.10/site-packages/nltk/collections.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Collections
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import bisect
9
+
10
+ # this unused import is for python 2.7
11
+ from collections import Counter, defaultdict, deque
12
+ from functools import total_ordering
13
+ from itertools import chain, islice
14
+
15
+ from nltk.internals import raise_unorderable_types, slice_bounds
16
+
17
+ ##########################################################################
18
+ # Ordered Dictionary
19
+ ##########################################################################
20
+
21
+
22
+ class OrderedDict(dict):
23
+ def __init__(self, data=None, **kwargs):
24
+ self._keys = self.keys(data, kwargs.get("keys"))
25
+ self._default_factory = kwargs.get("default_factory")
26
+ if data is None:
27
+ dict.__init__(self)
28
+ else:
29
+ dict.__init__(self, data)
30
+
31
+ def __delitem__(self, key):
32
+ dict.__delitem__(self, key)
33
+ self._keys.remove(key)
34
+
35
+ def __getitem__(self, key):
36
+ try:
37
+ return dict.__getitem__(self, key)
38
+ except KeyError:
39
+ return self.__missing__(key)
40
+
41
+ def __iter__(self):
42
+ return (key for key in self.keys())
43
+
44
+ def __missing__(self, key):
45
+ if not self._default_factory and key not in self._keys:
46
+ raise KeyError()
47
+ return self._default_factory()
48
+
49
+ def __setitem__(self, key, item):
50
+ dict.__setitem__(self, key, item)
51
+ if key not in self._keys:
52
+ self._keys.append(key)
53
+
54
+ def clear(self):
55
+ dict.clear(self)
56
+ self._keys.clear()
57
+
58
+ def copy(self):
59
+ d = dict.copy(self)
60
+ d._keys = self._keys
61
+ return d
62
+
63
+ def items(self):
64
+ # returns iterator under python 3 and list under python 2
65
+ return zip(self.keys(), self.values())
66
+
67
+ def keys(self, data=None, keys=None):
68
+ if data:
69
+ if keys:
70
+ assert isinstance(keys, list)
71
+ assert len(data) == len(keys)
72
+ return keys
73
+ else:
74
+ assert (
75
+ isinstance(data, dict)
76
+ or isinstance(data, OrderedDict)
77
+ or isinstance(data, list)
78
+ )
79
+ if isinstance(data, dict) or isinstance(data, OrderedDict):
80
+ return data.keys()
81
+ elif isinstance(data, list):
82
+ return [key for (key, value) in data]
83
+ elif "_keys" in self.__dict__:
84
+ return self._keys
85
+ else:
86
+ return []
87
+
88
+ def popitem(self):
89
+ if not self._keys:
90
+ raise KeyError()
91
+
92
+ key = self._keys.pop()
93
+ value = self[key]
94
+ del self[key]
95
+ return (key, value)
96
+
97
+ def setdefault(self, key, failobj=None):
98
+ dict.setdefault(self, key, failobj)
99
+ if key not in self._keys:
100
+ self._keys.append(key)
101
+
102
+ def update(self, data):
103
+ dict.update(self, data)
104
+ for key in self.keys(data):
105
+ if key not in self._keys:
106
+ self._keys.append(key)
107
+
108
+ def values(self):
109
+ # returns iterator under python 3
110
+ return map(self.get, self._keys)
111
+
112
+
113
+ ######################################################################
114
+ # Lazy Sequences
115
+ ######################################################################
116
+
117
+
118
+ @total_ordering
119
+ class AbstractLazySequence:
120
+ """
121
+ An abstract base class for read-only sequences whose values are
122
+ computed as needed. Lazy sequences act like tuples -- they can be
123
+ indexed, sliced, and iterated over; but they may not be modified.
124
+
125
+ The most common application of lazy sequences in NLTK is for
126
+ corpus view objects, which provide access to the contents of a
127
+ corpus without loading the entire corpus into memory, by loading
128
+ pieces of the corpus from disk as needed.
129
+
130
+ The result of modifying a mutable element of a lazy sequence is
131
+ undefined. In particular, the modifications made to the element
132
+ may or may not persist, depending on whether and when the lazy
133
+ sequence caches that element's value or reconstructs it from
134
+ scratch.
135
+
136
+ Subclasses are required to define two methods: ``__len__()``
137
+ and ``iterate_from()``.
138
+ """
139
+
140
+ def __len__(self):
141
+ """
142
+ Return the number of tokens in the corpus file underlying this
143
+ corpus view.
144
+ """
145
+ raise NotImplementedError("should be implemented by subclass")
146
+
147
+ def iterate_from(self, start):
148
+ """
149
+ Return an iterator that generates the tokens in the corpus
150
+ file underlying this corpus view, starting at the token number
151
+ ``start``. If ``start>=len(self)``, then this iterator will
152
+ generate no tokens.
153
+ """
154
+ raise NotImplementedError("should be implemented by subclass")
155
+
156
+ def __getitem__(self, i):
157
+ """
158
+ Return the *i* th token in the corpus file underlying this
159
+ corpus view. Negative indices and spans are both supported.
160
+ """
161
+ if isinstance(i, slice):
162
+ start, stop = slice_bounds(self, i)
163
+ return LazySubsequence(self, start, stop)
164
+ else:
165
+ # Handle negative indices
166
+ if i < 0:
167
+ i += len(self)
168
+ if i < 0:
169
+ raise IndexError("index out of range")
170
+ # Use iterate_from to extract it.
171
+ try:
172
+ return next(self.iterate_from(i))
173
+ except StopIteration as e:
174
+ raise IndexError("index out of range") from e
175
+
176
+ def __iter__(self):
177
+ """Return an iterator that generates the tokens in the corpus
178
+ file underlying this corpus view."""
179
+ return self.iterate_from(0)
180
+
181
+ def count(self, value):
182
+ """Return the number of times this list contains ``value``."""
183
+ return sum(1 for elt in self if elt == value)
184
+
185
+ def index(self, value, start=None, stop=None):
186
+ """Return the index of the first occurrence of ``value`` in this
187
+ list that is greater than or equal to ``start`` and less than
188
+ ``stop``. Negative start and stop values are treated like negative
189
+ slice bounds -- i.e., they count from the end of the list."""
190
+ start, stop = slice_bounds(self, slice(start, stop))
191
+ for i, elt in enumerate(islice(self, start, stop)):
192
+ if elt == value:
193
+ return i + start
194
+ raise ValueError("index(x): x not in list")
195
+
196
+ def __contains__(self, value):
197
+ """Return true if this list contains ``value``."""
198
+ return bool(self.count(value))
199
+
200
+ def __add__(self, other):
201
+ """Return a list concatenating self with other."""
202
+ return LazyConcatenation([self, other])
203
+
204
+ def __radd__(self, other):
205
+ """Return a list concatenating other with self."""
206
+ return LazyConcatenation([other, self])
207
+
208
+ def __mul__(self, count):
209
+ """Return a list concatenating self with itself ``count`` times."""
210
+ return LazyConcatenation([self] * count)
211
+
212
+ def __rmul__(self, count):
213
+ """Return a list concatenating self with itself ``count`` times."""
214
+ return LazyConcatenation([self] * count)
215
+
216
+ _MAX_REPR_SIZE = 60
217
+
218
+ def __repr__(self):
219
+ """
220
+ Return a string representation for this corpus view that is
221
+ similar to a list's representation; but if it would be more
222
+ than 60 characters long, it is truncated.
223
+ """
224
+ pieces = []
225
+ length = 5
226
+ for elt in self:
227
+ pieces.append(repr(elt))
228
+ length += len(pieces[-1]) + 2
229
+ if length > self._MAX_REPR_SIZE and len(pieces) > 2:
230
+ return "[%s, ...]" % ", ".join(pieces[:-1])
231
+ return "[%s]" % ", ".join(pieces)
232
+
233
+ def __eq__(self, other):
234
+ return type(self) == type(other) and list(self) == list(other)
235
+
236
+ def __ne__(self, other):
237
+ return not self == other
238
+
239
+ def __lt__(self, other):
240
+ if type(other) != type(self):
241
+ raise_unorderable_types("<", self, other)
242
+ return list(self) < list(other)
243
+
244
+ def __hash__(self):
245
+ """
246
+ :raise ValueError: Corpus view objects are unhashable.
247
+ """
248
+ raise ValueError("%s objects are unhashable" % self.__class__.__name__)
249
+
250
+
251
+ class LazySubsequence(AbstractLazySequence):
252
+ """
253
+ A subsequence produced by slicing a lazy sequence. This slice
254
+ keeps a reference to its source sequence, and generates its values
255
+ by looking them up in the source sequence.
256
+ """
257
+
258
+ MIN_SIZE = 100
259
+ """
260
+ The minimum size for which lazy slices should be created. If
261
+ ``LazySubsequence()`` is called with a subsequence that is
262
+ shorter than ``MIN_SIZE``, then a tuple will be returned instead.
263
+ """
264
+
265
+ def __new__(cls, source, start, stop):
266
+ """
267
+ Construct a new slice from a given underlying sequence. The
268
+ ``start`` and ``stop`` indices should be absolute indices --
269
+ i.e., they should not be negative (for indexing from the back
270
+ of a list) or greater than the length of ``source``.
271
+ """
272
+ # If the slice is small enough, just use a tuple.
273
+ if stop - start < cls.MIN_SIZE:
274
+ return list(islice(source.iterate_from(start), stop - start))
275
+ else:
276
+ return object.__new__(cls)
277
+
278
+ def __init__(self, source, start, stop):
279
+ self._source = source
280
+ self._start = start
281
+ self._stop = stop
282
+
283
+ def __len__(self):
284
+ return self._stop - self._start
285
+
286
+ def iterate_from(self, start):
287
+ return islice(
288
+ self._source.iterate_from(start + self._start), max(0, len(self) - start)
289
+ )
290
+
291
+
292
+ class LazyConcatenation(AbstractLazySequence):
293
+ """
294
+ A lazy sequence formed by concatenating a list of lists. This
295
+ underlying list of lists may itself be lazy. ``LazyConcatenation``
296
+ maintains an index that it uses to keep track of the relationship
297
+ between offsets in the concatenated lists and offsets in the
298
+ sublists.
299
+ """
300
+
301
+ def __init__(self, list_of_lists):
302
+ self._list = list_of_lists
303
+ self._offsets = [0]
304
+
305
+ def __len__(self):
306
+ if len(self._offsets) <= len(self._list):
307
+ for _ in self.iterate_from(self._offsets[-1]):
308
+ pass
309
+ return self._offsets[-1]
310
+
311
+ def iterate_from(self, start_index):
312
+ if start_index < self._offsets[-1]:
313
+ sublist_index = bisect.bisect_right(self._offsets, start_index) - 1
314
+ else:
315
+ sublist_index = len(self._offsets) - 1
316
+
317
+ index = self._offsets[sublist_index]
318
+
319
+ # Construct an iterator over the sublists.
320
+ if isinstance(self._list, AbstractLazySequence):
321
+ sublist_iter = self._list.iterate_from(sublist_index)
322
+ else:
323
+ sublist_iter = islice(self._list, sublist_index, None)
324
+
325
+ for sublist in sublist_iter:
326
+ if sublist_index == (len(self._offsets) - 1):
327
+ assert (
328
+ index + len(sublist) >= self._offsets[-1]
329
+ ), "offsets not monotonic increasing!"
330
+ self._offsets.append(index + len(sublist))
331
+ else:
332
+ assert self._offsets[sublist_index + 1] == index + len(
333
+ sublist
334
+ ), "inconsistent list value (num elts)"
335
+
336
+ yield from sublist[max(0, start_index - index) :]
337
+
338
+ index += len(sublist)
339
+ sublist_index += 1
340
+
341
+
342
+ class LazyMap(AbstractLazySequence):
343
+ """
344
+ A lazy sequence whose elements are formed by applying a given
345
+ function to each element in one or more underlying lists. The
346
+ function is applied lazily -- i.e., when you read a value from the
347
+ list, ``LazyMap`` will calculate that value by applying its
348
+ function to the underlying lists' value(s). ``LazyMap`` is
349
+ essentially a lazy version of the Python primitive function
350
+ ``map``. In particular, the following two expressions are
351
+ equivalent:
352
+
353
+ >>> from nltk.collections import LazyMap
354
+ >>> function = str
355
+ >>> sequence = [1,2,3]
356
+ >>> map(function, sequence) # doctest: +SKIP
357
+ ['1', '2', '3']
358
+ >>> list(LazyMap(function, sequence))
359
+ ['1', '2', '3']
360
+
361
+ Like the Python ``map`` primitive, if the source lists do not have
362
+ equal size, then the value None will be supplied for the
363
+ 'missing' elements.
364
+
365
+ Lazy maps can be useful for conserving memory, in cases where
366
+ individual values take up a lot of space. This is especially true
367
+ if the underlying list's values are constructed lazily, as is the
368
+ case with many corpus readers.
369
+
370
+ A typical example of a use case for this class is performing
371
+ feature detection on the tokens in a corpus. Since featuresets
372
+ are encoded as dictionaries, which can take up a lot of memory,
373
+ using a ``LazyMap`` can significantly reduce memory usage when
374
+ training and running classifiers.
375
+ """
376
+
377
+ def __init__(self, function, *lists, **config):
378
+ """
379
+ :param function: The function that should be applied to
380
+ elements of ``lists``. It should take as many arguments
381
+ as there are ``lists``.
382
+ :param lists: The underlying lists.
383
+ :param cache_size: Determines the size of the cache used
384
+ by this lazy map. (default=5)
385
+ """
386
+ if not lists:
387
+ raise TypeError("LazyMap requires at least two args")
388
+
389
+ self._lists = lists
390
+ self._func = function
391
+ self._cache_size = config.get("cache_size", 5)
392
+ self._cache = {} if self._cache_size > 0 else None
393
+
394
+ # If you just take bool() of sum() here _all_lazy will be true just
395
+ # in case n >= 1 list is an AbstractLazySequence. Presumably this
396
+ # isn't what's intended.
397
+ self._all_lazy = sum(
398
+ isinstance(lst, AbstractLazySequence) for lst in lists
399
+ ) == len(lists)
400
+
401
+ def iterate_from(self, index):
402
+ # Special case: one lazy sublist
403
+ if len(self._lists) == 1 and self._all_lazy:
404
+ for value in self._lists[0].iterate_from(index):
405
+ yield self._func(value)
406
+ return
407
+
408
+ # Special case: one non-lazy sublist
409
+ elif len(self._lists) == 1:
410
+ while True:
411
+ try:
412
+ yield self._func(self._lists[0][index])
413
+ except IndexError:
414
+ return
415
+ index += 1
416
+
417
+ # Special case: n lazy sublists
418
+ elif self._all_lazy:
419
+ iterators = [lst.iterate_from(index) for lst in self._lists]
420
+ while True:
421
+ elements = []
422
+ for iterator in iterators:
423
+ try:
424
+ elements.append(next(iterator))
425
+ except: # FIXME: What is this except really catching? StopIteration?
426
+ elements.append(None)
427
+ if elements == [None] * len(self._lists):
428
+ return
429
+ yield self._func(*elements)
430
+ index += 1
431
+
432
+ # general case
433
+ else:
434
+ while True:
435
+ try:
436
+ elements = [lst[index] for lst in self._lists]
437
+ except IndexError:
438
+ elements = [None] * len(self._lists)
439
+ for i, lst in enumerate(self._lists):
440
+ try:
441
+ elements[i] = lst[index]
442
+ except IndexError:
443
+ pass
444
+ if elements == [None] * len(self._lists):
445
+ return
446
+ yield self._func(*elements)
447
+ index += 1
448
+
449
+ def __getitem__(self, index):
450
+ if isinstance(index, slice):
451
+ sliced_lists = [lst[index] for lst in self._lists]
452
+ return LazyMap(self._func, *sliced_lists)
453
+ else:
454
+ # Handle negative indices
455
+ if index < 0:
456
+ index += len(self)
457
+ if index < 0:
458
+ raise IndexError("index out of range")
459
+ # Check the cache
460
+ if self._cache is not None and index in self._cache:
461
+ return self._cache[index]
462
+ # Calculate the value
463
+ try:
464
+ val = next(self.iterate_from(index))
465
+ except StopIteration as e:
466
+ raise IndexError("index out of range") from e
467
+ # Update the cache
468
+ if self._cache is not None:
469
+ if len(self._cache) > self._cache_size:
470
+ self._cache.popitem() # discard random entry
471
+ self._cache[index] = val
472
+ # Return the value
473
+ return val
474
+
475
+ def __len__(self):
476
+ return max(len(lst) for lst in self._lists)
477
+
478
+
479
+ class LazyZip(LazyMap):
480
+ """
481
+ A lazy sequence whose elements are tuples, each containing the i-th
482
+ element from each of the argument sequences. The returned list is
483
+ truncated in length to the length of the shortest argument sequence. The
484
+ tuples are constructed lazily -- i.e., when you read a value from the
485
+ list, ``LazyZip`` will calculate that value by forming a tuple from
486
+ the i-th element of each of the argument sequences.
487
+
488
+ ``LazyZip`` is essentially a lazy version of the Python primitive function
489
+ ``zip``. In particular, an evaluated LazyZip is equivalent to a zip:
490
+
491
+ >>> from nltk.collections import LazyZip
492
+ >>> sequence1, sequence2 = [1, 2, 3], ['a', 'b', 'c']
493
+ >>> zip(sequence1, sequence2) # doctest: +SKIP
494
+ [(1, 'a'), (2, 'b'), (3, 'c')]
495
+ >>> list(LazyZip(sequence1, sequence2))
496
+ [(1, 'a'), (2, 'b'), (3, 'c')]
497
+ >>> sequences = [sequence1, sequence2, [6,7,8,9]]
498
+ >>> list(zip(*sequences)) == list(LazyZip(*sequences))
499
+ True
500
+
501
+ Lazy zips can be useful for conserving memory in cases where the argument
502
+ sequences are particularly long.
503
+
504
+ A typical example of a use case for this class is combining long sequences
505
+ of gold standard and predicted values in a classification or tagging task
506
+ in order to calculate accuracy. By constructing tuples lazily and
507
+ avoiding the creation of an additional long sequence, memory usage can be
508
+ significantly reduced.
509
+ """
510
+
511
+ def __init__(self, *lists):
512
+ """
513
+ :param lists: the underlying lists
514
+ :type lists: list(list)
515
+ """
516
+ LazyMap.__init__(self, lambda *elts: elts, *lists)
517
+
518
+ def iterate_from(self, index):
519
+ iterator = LazyMap.iterate_from(self, index)
520
+ while index < len(self):
521
+ yield next(iterator)
522
+ index += 1
523
+ return
524
+
525
+ def __len__(self):
526
+ return min(len(lst) for lst in self._lists)
527
+
528
+
529
+ class LazyEnumerate(LazyZip):
530
+ """
531
+ A lazy sequence whose elements are tuples, each containing a count (from
532
+ zero) and a value yielded by underlying sequence. ``LazyEnumerate`` is
533
+ useful for obtaining an indexed list. The tuples are constructed lazily
534
+ -- i.e., when you read a value from the list, ``LazyEnumerate`` will
535
+ calculate that value by forming a tuple from the count of the i-th
536
+ element and the i-th element of the underlying sequence.
537
+
538
+ ``LazyEnumerate`` is essentially a lazy version of the Python primitive
539
+ function ``enumerate``. In particular, the following two expressions are
540
+ equivalent:
541
+
542
+ >>> from nltk.collections import LazyEnumerate
543
+ >>> sequence = ['first', 'second', 'third']
544
+ >>> list(enumerate(sequence))
545
+ [(0, 'first'), (1, 'second'), (2, 'third')]
546
+ >>> list(LazyEnumerate(sequence))
547
+ [(0, 'first'), (1, 'second'), (2, 'third')]
548
+
549
+ Lazy enumerations can be useful for conserving memory in cases where the
550
+ argument sequences are particularly long.
551
+
552
+ A typical example of a use case for this class is obtaining an indexed
553
+ list for a long sequence of values. By constructing tuples lazily and
554
+ avoiding the creation of an additional long sequence, memory usage can be
555
+ significantly reduced.
556
+ """
557
+
558
+ def __init__(self, lst):
559
+ """
560
+ :param lst: the underlying list
561
+ :type lst: list
562
+ """
563
+ LazyZip.__init__(self, range(len(lst)), lst)
564
+
565
+
566
+ class LazyIteratorList(AbstractLazySequence):
567
+ """
568
+ Wraps an iterator, loading its elements on demand
569
+ and making them subscriptable.
570
+ __repr__ displays only the first few elements.
571
+ """
572
+
573
+ def __init__(self, it, known_len=None):
574
+ self._it = it
575
+ self._len = known_len
576
+ self._cache = []
577
+
578
+ def __len__(self):
579
+ if self._len:
580
+ return self._len
581
+ for _ in self.iterate_from(len(self._cache)):
582
+ pass
583
+ self._len = len(self._cache)
584
+ return self._len
585
+
586
+ def iterate_from(self, start):
587
+ """Create a new iterator over this list starting at the given offset."""
588
+ while len(self._cache) < start:
589
+ v = next(self._it)
590
+ self._cache.append(v)
591
+ i = start
592
+ while i < len(self._cache):
593
+ yield self._cache[i]
594
+ i += 1
595
+ try:
596
+ while True:
597
+ v = next(self._it)
598
+ self._cache.append(v)
599
+ yield v
600
+ except StopIteration:
601
+ pass
602
+
603
+ def __add__(self, other):
604
+ """Return a list concatenating self with other."""
605
+ return type(self)(chain(self, other))
606
+
607
+ def __radd__(self, other):
608
+ """Return a list concatenating other with self."""
609
+ return type(self)(chain(other, self))
610
+
611
+
612
+ ######################################################################
613
+ # Trie Implementation
614
+ ######################################################################
615
+ class Trie(dict):
616
+ """A Trie implementation for strings"""
617
+
618
+ LEAF = True
619
+
620
+ def __init__(self, strings=None):
621
+ """Builds a Trie object, which is built around a ``dict``
622
+
623
+ If ``strings`` is provided, it will add the ``strings``, which
624
+ consist of a ``list`` of ``strings``, to the Trie.
625
+ Otherwise, it'll construct an empty Trie.
626
+
627
+ :param strings: List of strings to insert into the trie
628
+ (Default is ``None``)
629
+ :type strings: list(str)
630
+
631
+ """
632
+ super().__init__()
633
+ if strings:
634
+ for string in strings:
635
+ self.insert(string)
636
+
637
+ def insert(self, string):
638
+ """Inserts ``string`` into the Trie
639
+
640
+ :param string: String to insert into the trie
641
+ :type string: str
642
+
643
+ :Example:
644
+
645
+ >>> from nltk.collections import Trie
646
+ >>> trie = Trie(["abc", "def"])
647
+ >>> expected = {'a': {'b': {'c': {True: None}}}, \
648
+ 'd': {'e': {'f': {True: None}}}}
649
+ >>> trie == expected
650
+ True
651
+
652
+ """
653
+ if len(string):
654
+ self[string[0]].insert(string[1:])
655
+ else:
656
+ # mark the string is complete
657
+ self[Trie.LEAF] = None
658
+
659
+ def __missing__(self, key):
660
+ self[key] = Trie()
661
+ return self[key]
llmeval-env/lib/python3.10/site-packages/nltk/collocations.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Collocations and Association Measures
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joel Nothman <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+ """
9
+ Tools to identify collocations --- words that often appear consecutively
10
+ --- within corpora. They may also be used to find other associations between
11
+ word occurrences.
12
+ See Manning and Schutze ch. 5 at https://nlp.stanford.edu/fsnlp/promo/colloc.pdf
13
+ and the Text::NSP Perl package at http://ngram.sourceforge.net
14
+
15
+ Finding collocations requires first calculating the frequencies of words and
16
+ their appearance in the context of other words. Often the collection of words
17
+ will then requiring filtering to only retain useful content terms. Each ngram
18
+ of words may then be scored according to some association measure, in order
19
+ to determine the relative likelihood of each ngram being a collocation.
20
+
21
+ The ``BigramCollocationFinder`` and ``TrigramCollocationFinder`` classes provide
22
+ these functionalities, dependent on being provided a function which scores a
23
+ ngram given appropriate frequency counts. A number of standard association
24
+ measures are provided in bigram_measures and trigram_measures.
25
+ """
26
+
27
+ # Possible TODOs:
28
+ # - consider the distinction between f(x,_) and f(x) and whether our
29
+ # approximation is good enough for fragmented data, and mention it
30
+ # - add a n-gram collocation finder with measures which only utilise n-gram
31
+ # and unigram counts (raw_freq, pmi, student_t)
32
+
33
+ import itertools as _itertools
34
+
35
+ # these two unused imports are referenced in collocations.doctest
36
+ from nltk.metrics import (
37
+ BigramAssocMeasures,
38
+ ContingencyMeasures,
39
+ QuadgramAssocMeasures,
40
+ TrigramAssocMeasures,
41
+ )
42
+ from nltk.metrics.spearman import ranks_from_scores, spearman_correlation
43
+ from nltk.probability import FreqDist
44
+ from nltk.util import ngrams
45
+
46
+
47
+ class AbstractCollocationFinder:
48
+ """
49
+ An abstract base class for collocation finders whose purpose is to
50
+ collect collocation candidate frequencies, filter and rank them.
51
+
52
+ As a minimum, collocation finders require the frequencies of each
53
+ word in a corpus, and the joint frequency of word tuples. This data
54
+ should be provided through nltk.probability.FreqDist objects or an
55
+ identical interface.
56
+ """
57
+
58
+ def __init__(self, word_fd, ngram_fd):
59
+ self.word_fd = word_fd
60
+ self.N = word_fd.N()
61
+ self.ngram_fd = ngram_fd
62
+
63
+ @classmethod
64
+ def _build_new_documents(
65
+ cls, documents, window_size, pad_left=False, pad_right=False, pad_symbol=None
66
+ ):
67
+ """
68
+ Pad the document with the place holder according to the window_size
69
+ """
70
+ padding = (pad_symbol,) * (window_size - 1)
71
+ if pad_right:
72
+ return _itertools.chain.from_iterable(
73
+ _itertools.chain(doc, padding) for doc in documents
74
+ )
75
+ if pad_left:
76
+ return _itertools.chain.from_iterable(
77
+ _itertools.chain(padding, doc) for doc in documents
78
+ )
79
+
80
+ @classmethod
81
+ def from_documents(cls, documents):
82
+ """Constructs a collocation finder given a collection of documents,
83
+ each of which is a list (or iterable) of tokens.
84
+ """
85
+ # return cls.from_words(_itertools.chain(*documents))
86
+ return cls.from_words(
87
+ cls._build_new_documents(documents, cls.default_ws, pad_right=True)
88
+ )
89
+
90
+ @staticmethod
91
+ def _ngram_freqdist(words, n):
92
+ return FreqDist(tuple(words[i : i + n]) for i in range(len(words) - 1))
93
+
94
+ def _apply_filter(self, fn=lambda ngram, freq: False):
95
+ """Generic filter removes ngrams from the frequency distribution
96
+ if the function returns True when passed an ngram tuple.
97
+ """
98
+ tmp_ngram = FreqDist()
99
+ for ngram, freq in self.ngram_fd.items():
100
+ if not fn(ngram, freq):
101
+ tmp_ngram[ngram] = freq
102
+ self.ngram_fd = tmp_ngram
103
+
104
+ def apply_freq_filter(self, min_freq):
105
+ """Removes candidate ngrams which have frequency less than min_freq."""
106
+ self._apply_filter(lambda ng, freq: freq < min_freq)
107
+
108
+ def apply_ngram_filter(self, fn):
109
+ """Removes candidate ngrams (w1, w2, ...) where fn(w1, w2, ...)
110
+ evaluates to True.
111
+ """
112
+ self._apply_filter(lambda ng, f: fn(*ng))
113
+
114
+ def apply_word_filter(self, fn):
115
+ """Removes candidate ngrams (w1, w2, ...) where any of (fn(w1), fn(w2),
116
+ ...) evaluates to True.
117
+ """
118
+ self._apply_filter(lambda ng, f: any(fn(w) for w in ng))
119
+
120
+ def _score_ngrams(self, score_fn):
121
+ """Generates of (ngram, score) pairs as determined by the scoring
122
+ function provided.
123
+ """
124
+ for tup in self.ngram_fd:
125
+ score = self.score_ngram(score_fn, *tup)
126
+ if score is not None:
127
+ yield tup, score
128
+
129
+ def score_ngrams(self, score_fn):
130
+ """Returns a sequence of (ngram, score) pairs ordered from highest to
131
+ lowest score, as determined by the scoring function provided.
132
+ """
133
+ return sorted(self._score_ngrams(score_fn), key=lambda t: (-t[1], t[0]))
134
+
135
+ def nbest(self, score_fn, n):
136
+ """Returns the top n ngrams when scored by the given function."""
137
+ return [p for p, s in self.score_ngrams(score_fn)[:n]]
138
+
139
+ def above_score(self, score_fn, min_score):
140
+ """Returns a sequence of ngrams, ordered by decreasing score, whose
141
+ scores each exceed the given minimum score.
142
+ """
143
+ for ngram, score in self.score_ngrams(score_fn):
144
+ if score > min_score:
145
+ yield ngram
146
+ else:
147
+ break
148
+
149
+
150
+ class BigramCollocationFinder(AbstractCollocationFinder):
151
+ """A tool for the finding and ranking of bigram collocations or other
152
+ association measures. It is often useful to use from_words() rather than
153
+ constructing an instance directly.
154
+ """
155
+
156
+ default_ws = 2
157
+
158
+ def __init__(self, word_fd, bigram_fd, window_size=2):
159
+ """Construct a BigramCollocationFinder, given FreqDists for
160
+ appearances of words and (possibly non-contiguous) bigrams.
161
+ """
162
+ AbstractCollocationFinder.__init__(self, word_fd, bigram_fd)
163
+ self.window_size = window_size
164
+
165
+ @classmethod
166
+ def from_words(cls, words, window_size=2):
167
+ """Construct a BigramCollocationFinder for all bigrams in the given
168
+ sequence. When window_size > 2, count non-contiguous bigrams, in the
169
+ style of Church and Hanks's (1990) association ratio.
170
+ """
171
+ wfd = FreqDist()
172
+ bfd = FreqDist()
173
+
174
+ if window_size < 2:
175
+ raise ValueError("Specify window_size at least 2")
176
+
177
+ for window in ngrams(words, window_size, pad_right=True):
178
+ w1 = window[0]
179
+ if w1 is None:
180
+ continue
181
+ wfd[w1] += 1
182
+ for w2 in window[1:]:
183
+ if w2 is not None:
184
+ bfd[(w1, w2)] += 1
185
+ return cls(wfd, bfd, window_size=window_size)
186
+
187
+ def score_ngram(self, score_fn, w1, w2):
188
+ """Returns the score for a given bigram using the given scoring
189
+ function. Following Church and Hanks (1990), counts are scaled by
190
+ a factor of 1/(window_size - 1).
191
+ """
192
+ n_all = self.N
193
+ n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0)
194
+ if not n_ii:
195
+ return
196
+ n_ix = self.word_fd[w1]
197
+ n_xi = self.word_fd[w2]
198
+ return score_fn(n_ii, (n_ix, n_xi), n_all)
199
+
200
+
201
+ class TrigramCollocationFinder(AbstractCollocationFinder):
202
+ """A tool for the finding and ranking of trigram collocations or other
203
+ association measures. It is often useful to use from_words() rather than
204
+ constructing an instance directly.
205
+ """
206
+
207
+ default_ws = 3
208
+
209
+ def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd):
210
+ """Construct a TrigramCollocationFinder, given FreqDists for
211
+ appearances of words, bigrams, two words with any word between them,
212
+ and trigrams.
213
+ """
214
+ AbstractCollocationFinder.__init__(self, word_fd, trigram_fd)
215
+ self.wildcard_fd = wildcard_fd
216
+ self.bigram_fd = bigram_fd
217
+
218
+ @classmethod
219
+ def from_words(cls, words, window_size=3):
220
+ """Construct a TrigramCollocationFinder for all trigrams in the given
221
+ sequence.
222
+ """
223
+ if window_size < 3:
224
+ raise ValueError("Specify window_size at least 3")
225
+
226
+ wfd = FreqDist()
227
+ wildfd = FreqDist()
228
+ bfd = FreqDist()
229
+ tfd = FreqDist()
230
+ for window in ngrams(words, window_size, pad_right=True):
231
+ w1 = window[0]
232
+ if w1 is None:
233
+ continue
234
+ for w2, w3 in _itertools.combinations(window[1:], 2):
235
+ wfd[w1] += 1
236
+ if w2 is None:
237
+ continue
238
+ bfd[(w1, w2)] += 1
239
+ if w3 is None:
240
+ continue
241
+ wildfd[(w1, w3)] += 1
242
+ tfd[(w1, w2, w3)] += 1
243
+ return cls(wfd, bfd, wildfd, tfd)
244
+
245
+ def bigram_finder(self):
246
+ """Constructs a bigram collocation finder with the bigram and unigram
247
+ data from this finder. Note that this does not include any filtering
248
+ applied to this finder.
249
+ """
250
+ return BigramCollocationFinder(self.word_fd, self.bigram_fd)
251
+
252
+ def score_ngram(self, score_fn, w1, w2, w3):
253
+ """Returns the score for a given trigram using the given scoring
254
+ function.
255
+ """
256
+ n_all = self.N
257
+ n_iii = self.ngram_fd[(w1, w2, w3)]
258
+ if not n_iii:
259
+ return
260
+ n_iix = self.bigram_fd[(w1, w2)]
261
+ n_ixi = self.wildcard_fd[(w1, w3)]
262
+ n_xii = self.bigram_fd[(w2, w3)]
263
+ n_ixx = self.word_fd[w1]
264
+ n_xix = self.word_fd[w2]
265
+ n_xxi = self.word_fd[w3]
266
+ return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all)
267
+
268
+
269
+ class QuadgramCollocationFinder(AbstractCollocationFinder):
270
+ """A tool for the finding and ranking of quadgram collocations or other association measures.
271
+ It is often useful to use from_words() rather than constructing an instance directly.
272
+ """
273
+
274
+ default_ws = 4
275
+
276
+ def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii):
277
+ """Construct a QuadgramCollocationFinder, given FreqDists for appearances of words,
278
+ bigrams, trigrams, two words with one word and two words between them, three words
279
+ with a word between them in both variations.
280
+ """
281
+ AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd)
282
+ self.iii = iii
283
+ self.ii = ii
284
+ self.ixi = ixi
285
+ self.ixxi = ixxi
286
+ self.iixi = iixi
287
+ self.ixii = ixii
288
+
289
+ @classmethod
290
+ def from_words(cls, words, window_size=4):
291
+ if window_size < 4:
292
+ raise ValueError("Specify window_size at least 4")
293
+ ixxx = FreqDist()
294
+ iiii = FreqDist()
295
+ ii = FreqDist()
296
+ iii = FreqDist()
297
+ ixi = FreqDist()
298
+ ixxi = FreqDist()
299
+ iixi = FreqDist()
300
+ ixii = FreqDist()
301
+
302
+ for window in ngrams(words, window_size, pad_right=True):
303
+ w1 = window[0]
304
+ if w1 is None:
305
+ continue
306
+ for w2, w3, w4 in _itertools.combinations(window[1:], 3):
307
+ ixxx[w1] += 1
308
+ if w2 is None:
309
+ continue
310
+ ii[(w1, w2)] += 1
311
+ if w3 is None:
312
+ continue
313
+ iii[(w1, w2, w3)] += 1
314
+ ixi[(w1, w3)] += 1
315
+ if w4 is None:
316
+ continue
317
+ iiii[(w1, w2, w3, w4)] += 1
318
+ ixxi[(w1, w4)] += 1
319
+ ixii[(w1, w3, w4)] += 1
320
+ iixi[(w1, w2, w4)] += 1
321
+
322
+ return cls(ixxx, iiii, ii, iii, ixi, ixxi, iixi, ixii)
323
+
324
+ def score_ngram(self, score_fn, w1, w2, w3, w4):
325
+ n_all = self.N
326
+ n_iiii = self.ngram_fd[(w1, w2, w3, w4)]
327
+ if not n_iiii:
328
+ return
329
+ n_iiix = self.iii[(w1, w2, w3)]
330
+ n_xiii = self.iii[(w2, w3, w4)]
331
+ n_iixi = self.iixi[(w1, w2, w4)]
332
+ n_ixii = self.ixii[(w1, w3, w4)]
333
+
334
+ n_iixx = self.ii[(w1, w2)]
335
+ n_xxii = self.ii[(w3, w4)]
336
+ n_xiix = self.ii[(w2, w3)]
337
+ n_ixix = self.ixi[(w1, w3)]
338
+ n_ixxi = self.ixxi[(w1, w4)]
339
+ n_xixi = self.ixi[(w2, w4)]
340
+
341
+ n_ixxx = self.word_fd[w1]
342
+ n_xixx = self.word_fd[w2]
343
+ n_xxix = self.word_fd[w3]
344
+ n_xxxi = self.word_fd[w4]
345
+ return score_fn(
346
+ n_iiii,
347
+ (n_iiix, n_iixi, n_ixii, n_xiii),
348
+ (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
349
+ (n_ixxx, n_xixx, n_xxix, n_xxxi),
350
+ n_all,
351
+ )
352
+
353
+
354
+ def demo(scorer=None, compare_scorer=None):
355
+ """Finds bigram collocations in the files of the WebText corpus."""
356
+ from nltk.metrics import (
357
+ BigramAssocMeasures,
358
+ ranks_from_scores,
359
+ spearman_correlation,
360
+ )
361
+
362
+ if scorer is None:
363
+ scorer = BigramAssocMeasures.likelihood_ratio
364
+ if compare_scorer is None:
365
+ compare_scorer = BigramAssocMeasures.raw_freq
366
+
367
+ from nltk.corpus import stopwords, webtext
368
+
369
+ ignored_words = stopwords.words("english")
370
+ word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words
371
+
372
+ for file in webtext.fileids():
373
+ words = [word.lower() for word in webtext.words(file)]
374
+
375
+ cf = BigramCollocationFinder.from_words(words)
376
+ cf.apply_freq_filter(3)
377
+ cf.apply_word_filter(word_filter)
378
+
379
+ corr = spearman_correlation(
380
+ ranks_from_scores(cf.score_ngrams(scorer)),
381
+ ranks_from_scores(cf.score_ngrams(compare_scorer)),
382
+ )
383
+ print(file)
384
+ print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)])
385
+ print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}")
386
+
387
+
388
+ # Slows down loading too much
389
+ # bigram_measures = BigramAssocMeasures()
390
+ # trigram_measures = TrigramAssocMeasures()
391
+
392
+ if __name__ == "__main__":
393
+ import sys
394
+
395
+ from nltk.metrics import BigramAssocMeasures
396
+
397
+ try:
398
+ scorer = eval("BigramAssocMeasures." + sys.argv[1])
399
+ except IndexError:
400
+ scorer = None
401
+ try:
402
+ compare_scorer = eval("BigramAssocMeasures." + sys.argv[2])
403
+ except IndexError:
404
+ compare_scorer = None
405
+
406
+ demo(scorer, compare_scorer)
407
+
408
+ __all__ = [
409
+ "BigramCollocationFinder",
410
+ "TrigramCollocationFinder",
411
+ "QuadgramCollocationFinder",
412
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/compat.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Compatibility
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ #
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import os
9
+ from functools import wraps
10
+
11
+ # ======= Compatibility for datasets that care about Python versions ========
12
+
13
+ # The following datasets have a /PY3 subdirectory containing
14
+ # a full copy of the data which has been re-encoded or repickled.
15
+ DATA_UPDATES = [
16
+ ("chunkers", "maxent_ne_chunker"),
17
+ ("help", "tagsets"),
18
+ ("taggers", "maxent_treebank_pos_tagger"),
19
+ ("tokenizers", "punkt"),
20
+ ]
21
+
22
+ _PY3_DATA_UPDATES = [os.path.join(*path_list) for path_list in DATA_UPDATES]
23
+
24
+
25
+ def add_py3_data(path):
26
+ for item in _PY3_DATA_UPDATES:
27
+ if item in str(path) and "/PY3" not in str(path):
28
+ pos = path.index(item) + len(item)
29
+ if path[pos : pos + 4] == ".zip":
30
+ pos += 4
31
+ path = path[:pos] + "/PY3" + path[pos:]
32
+ break
33
+ return path
34
+
35
+
36
+ # for use in adding /PY3 to the second (filename) argument
37
+ # of the file pointers in data.py
38
+ def py3_data(init_func):
39
+ def _decorator(*args, **kwargs):
40
+ args = (args[0], add_py3_data(args[1])) + args[2:]
41
+ return init_func(*args, **kwargs)
42
+
43
+ return wraps(init_func)(_decorator)
llmeval-env/lib/python3.10/site-packages/nltk/data.py ADDED
@@ -0,0 +1,1441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Utility functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Functions to find and load NLTK resource files, such as corpora,
10
+ grammars, and saved processing objects. Resource files are identified
11
+ using URLs, such as ``nltk:corpora/abc/rural.txt`` or
12
+ ``https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg``.
13
+ The following URL protocols are supported:
14
+
15
+ - ``file:path``: Specifies the file whose path is *path*.
16
+ Both relative and absolute paths may be used.
17
+
18
+ - ``https://host/path``: Specifies the file stored on the web
19
+ server *host* at path *path*.
20
+
21
+ - ``nltk:path``: Specifies the file stored in the NLTK data
22
+ package at *path*. NLTK will search for these files in the
23
+ directories specified by ``nltk.data.path``.
24
+
25
+ If no protocol is specified, then the default protocol ``nltk:`` will
26
+ be used.
27
+
28
+ This module provides to functions that can be used to access a
29
+ resource file, given its URL: ``load()`` loads a given resource, and
30
+ adds it to a resource cache; and ``retrieve()`` copies a given resource
31
+ to a local file.
32
+ """
33
+
34
+ import codecs
35
+ import functools
36
+ import os
37
+ import pickle
38
+ import re
39
+ import sys
40
+ import textwrap
41
+ import zipfile
42
+ from abc import ABCMeta, abstractmethod
43
+ from gzip import WRITE as GZ_WRITE
44
+ from gzip import GzipFile
45
+ from io import BytesIO, TextIOWrapper
46
+ from urllib.request import url2pathname, urlopen
47
+
48
+ try:
49
+ from zlib import Z_SYNC_FLUSH as FLUSH
50
+ except ImportError:
51
+ from zlib import Z_FINISH as FLUSH
52
+
53
+ from nltk import grammar, sem
54
+ from nltk.compat import add_py3_data, py3_data
55
+ from nltk.internals import deprecated
56
+
57
+ textwrap_indent = functools.partial(textwrap.indent, prefix=" ")
58
+
59
+ ######################################################################
60
+ # Search Path
61
+ ######################################################################
62
+
63
+ path = []
64
+ """A list of directories where the NLTK data package might reside.
65
+ These directories will be checked in order when looking for a
66
+ resource in the data package. Note that this allows users to
67
+ substitute in their own versions of resources, if they have them
68
+ (e.g., in their home directory under ~/nltk_data)."""
69
+
70
+ # User-specified locations:
71
+ _paths_from_env = os.environ.get("NLTK_DATA", "").split(os.pathsep)
72
+ path += [d for d in _paths_from_env if d]
73
+ if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/":
74
+ path.append(os.path.expanduser("~/nltk_data"))
75
+
76
+ if sys.platform.startswith("win"):
77
+ # Common locations on Windows:
78
+ path += [
79
+ os.path.join(sys.prefix, "nltk_data"),
80
+ os.path.join(sys.prefix, "share", "nltk_data"),
81
+ os.path.join(sys.prefix, "lib", "nltk_data"),
82
+ os.path.join(os.environ.get("APPDATA", "C:\\"), "nltk_data"),
83
+ r"C:\nltk_data",
84
+ r"D:\nltk_data",
85
+ r"E:\nltk_data",
86
+ ]
87
+ else:
88
+ # Common locations on UNIX & OS X:
89
+ path += [
90
+ os.path.join(sys.prefix, "nltk_data"),
91
+ os.path.join(sys.prefix, "share", "nltk_data"),
92
+ os.path.join(sys.prefix, "lib", "nltk_data"),
93
+ "/usr/share/nltk_data",
94
+ "/usr/local/share/nltk_data",
95
+ "/usr/lib/nltk_data",
96
+ "/usr/local/lib/nltk_data",
97
+ ]
98
+
99
+
100
+ ######################################################################
101
+ # Util Functions
102
+ ######################################################################
103
+
104
+
105
+ def gzip_open_unicode(
106
+ filename,
107
+ mode="rb",
108
+ compresslevel=9,
109
+ encoding="utf-8",
110
+ fileobj=None,
111
+ errors=None,
112
+ newline=None,
113
+ ):
114
+ if fileobj is None:
115
+ fileobj = GzipFile(filename, mode, compresslevel, fileobj)
116
+ return TextIOWrapper(fileobj, encoding, errors, newline)
117
+
118
+
119
+ def split_resource_url(resource_url):
120
+ """
121
+ Splits a resource url into "<protocol>:<path>".
122
+
123
+ >>> windows = sys.platform.startswith('win')
124
+ >>> split_resource_url('nltk:home/nltk')
125
+ ('nltk', 'home/nltk')
126
+ >>> split_resource_url('nltk:/home/nltk')
127
+ ('nltk', '/home/nltk')
128
+ >>> split_resource_url('file:/home/nltk')
129
+ ('file', '/home/nltk')
130
+ >>> split_resource_url('file:///home/nltk')
131
+ ('file', '/home/nltk')
132
+ >>> split_resource_url('file:///C:/home/nltk')
133
+ ('file', '/C:/home/nltk')
134
+ """
135
+ protocol, path_ = resource_url.split(":", 1)
136
+ if protocol == "nltk":
137
+ pass
138
+ elif protocol == "file":
139
+ if path_.startswith("/"):
140
+ path_ = "/" + path_.lstrip("/")
141
+ else:
142
+ path_ = re.sub(r"^/{0,2}", "", path_)
143
+ return protocol, path_
144
+
145
+
146
+ def normalize_resource_url(resource_url):
147
+ r"""
148
+ Normalizes a resource url
149
+
150
+ >>> windows = sys.platform.startswith('win')
151
+ >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \
152
+ ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg'))
153
+ True
154
+ >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file'
155
+ True
156
+ >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file'
157
+ True
158
+ >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file'
159
+ True
160
+ >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file'
161
+ True
162
+ >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file'
163
+ True
164
+ >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file'
165
+ True
166
+ >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file'
167
+ True
168
+ >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg'
169
+ True
170
+ >>> normalize_resource_url('nltk:home/nltk')
171
+ 'nltk:home/nltk'
172
+ >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk'
173
+ True
174
+ >>> normalize_resource_url('https://example.com/dir/file')
175
+ 'https://example.com/dir/file'
176
+ >>> normalize_resource_url('dir/file')
177
+ 'nltk:dir/file'
178
+ """
179
+ try:
180
+ protocol, name = split_resource_url(resource_url)
181
+ except ValueError:
182
+ # the resource url has no protocol, use the nltk protocol by default
183
+ protocol = "nltk"
184
+ name = resource_url
185
+ # use file protocol if the path is an absolute path
186
+ if protocol == "nltk" and os.path.isabs(name):
187
+ protocol = "file://"
188
+ name = normalize_resource_name(name, False, None)
189
+ elif protocol == "file":
190
+ protocol = "file://"
191
+ # name is absolute
192
+ name = normalize_resource_name(name, False, None)
193
+ elif protocol == "nltk":
194
+ protocol = "nltk:"
195
+ name = normalize_resource_name(name, True)
196
+ else:
197
+ # handled by urllib
198
+ protocol += "://"
199
+ return "".join([protocol, name])
200
+
201
+
202
+ def normalize_resource_name(resource_name, allow_relative=True, relative_path=None):
203
+ """
204
+ :type resource_name: str or unicode
205
+ :param resource_name: The name of the resource to search for.
206
+ Resource names are posix-style relative path names, such as
207
+ ``corpora/brown``. Directory names will automatically
208
+ be converted to a platform-appropriate path separator.
209
+ Directory trailing slashes are preserved
210
+
211
+ >>> windows = sys.platform.startswith('win')
212
+ >>> normalize_resource_name('.', True)
213
+ './'
214
+ >>> normalize_resource_name('./', True)
215
+ './'
216
+ >>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file'
217
+ True
218
+ >>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file'
219
+ True
220
+ >>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file'
221
+ True
222
+ >>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file'
223
+ True
224
+ >>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file'
225
+ True
226
+ >>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file'
227
+ True
228
+ """
229
+ is_dir = bool(re.search(r"[\\/.]$", resource_name)) or resource_name.endswith(
230
+ os.path.sep
231
+ )
232
+ if sys.platform.startswith("win"):
233
+ resource_name = resource_name.lstrip("/")
234
+ else:
235
+ resource_name = re.sub(r"^/+", "/", resource_name)
236
+ if allow_relative:
237
+ resource_name = os.path.normpath(resource_name)
238
+ else:
239
+ if relative_path is None:
240
+ relative_path = os.curdir
241
+ resource_name = os.path.abspath(os.path.join(relative_path, resource_name))
242
+ resource_name = resource_name.replace("\\", "/").replace(os.path.sep, "/")
243
+ if sys.platform.startswith("win") and os.path.isabs(resource_name):
244
+ resource_name = "/" + resource_name
245
+ if is_dir and not resource_name.endswith("/"):
246
+ resource_name += "/"
247
+ return resource_name
248
+
249
+
250
+ ######################################################################
251
+ # Path Pointers
252
+ ######################################################################
253
+
254
+
255
+ class PathPointer(metaclass=ABCMeta):
256
+ """
257
+ An abstract base class for 'path pointers,' used by NLTK's data
258
+ package to identify specific paths. Two subclasses exist:
259
+ ``FileSystemPathPointer`` identifies a file that can be accessed
260
+ directly via a given absolute path. ``ZipFilePathPointer``
261
+ identifies a file contained within a zipfile, that can be accessed
262
+ by reading that zipfile.
263
+ """
264
+
265
+ @abstractmethod
266
+ def open(self, encoding=None):
267
+ """
268
+ Return a seekable read-only stream that can be used to read
269
+ the contents of the file identified by this path pointer.
270
+
271
+ :raise IOError: If the path specified by this pointer does
272
+ not contain a readable file.
273
+ """
274
+
275
+ @abstractmethod
276
+ def file_size(self):
277
+ """
278
+ Return the size of the file pointed to by this path pointer,
279
+ in bytes.
280
+
281
+ :raise IOError: If the path specified by this pointer does
282
+ not contain a readable file.
283
+ """
284
+
285
+ @abstractmethod
286
+ def join(self, fileid):
287
+ """
288
+ Return a new path pointer formed by starting at the path
289
+ identified by this pointer, and then following the relative
290
+ path given by ``fileid``. The path components of ``fileid``
291
+ should be separated by forward slashes, regardless of
292
+ the underlying file system's path separator character.
293
+ """
294
+
295
+
296
+ class FileSystemPathPointer(PathPointer, str):
297
+ """
298
+ A path pointer that identifies a file which can be accessed
299
+ directly via a given absolute path.
300
+ """
301
+
302
+ @py3_data
303
+ def __init__(self, _path):
304
+ """
305
+ Create a new path pointer for the given absolute path.
306
+
307
+ :raise IOError: If the given path does not exist.
308
+ """
309
+
310
+ _path = os.path.abspath(_path)
311
+ if not os.path.exists(_path):
312
+ raise OSError("No such file or directory: %r" % _path)
313
+ self._path = _path
314
+
315
+ # There's no need to call str.__init__(), since it's a no-op;
316
+ # str does all of its setup work in __new__.
317
+
318
+ @property
319
+ def path(self):
320
+ """The absolute path identified by this path pointer."""
321
+ return self._path
322
+
323
+ def open(self, encoding=None):
324
+ stream = open(self._path, "rb")
325
+ if encoding is not None:
326
+ stream = SeekableUnicodeStreamReader(stream, encoding)
327
+ return stream
328
+
329
+ def file_size(self):
330
+ return os.stat(self._path).st_size
331
+
332
+ def join(self, fileid):
333
+ _path = os.path.join(self._path, fileid)
334
+ return FileSystemPathPointer(_path)
335
+
336
+ def __repr__(self):
337
+ return "FileSystemPathPointer(%r)" % self._path
338
+
339
+ def __str__(self):
340
+ return self._path
341
+
342
+
343
+ @deprecated("Use gzip.GzipFile instead as it also uses a buffer.")
344
+ class BufferedGzipFile(GzipFile):
345
+ """A ``GzipFile`` subclass for compatibility with older nltk releases.
346
+
347
+ Use ``GzipFile`` directly as it also buffers in all supported
348
+ Python versions.
349
+ """
350
+
351
+ @py3_data
352
+ def __init__(
353
+ self, filename=None, mode=None, compresslevel=9, fileobj=None, **kwargs
354
+ ):
355
+ """Return a buffered gzip file object."""
356
+ GzipFile.__init__(self, filename, mode, compresslevel, fileobj)
357
+
358
+ def write(self, data):
359
+ # This is identical to GzipFile.write but does not return
360
+ # the bytes written to retain compatibility.
361
+ super().write(data)
362
+
363
+
364
+ class GzipFileSystemPathPointer(FileSystemPathPointer):
365
+ """
366
+ A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed
367
+ file located at a given absolute path. ``GzipFileSystemPathPointer`` is
368
+ appropriate for loading large gzip-compressed pickle objects efficiently.
369
+ """
370
+
371
+ def open(self, encoding=None):
372
+ stream = GzipFile(self._path, "rb")
373
+ if encoding:
374
+ stream = SeekableUnicodeStreamReader(stream, encoding)
375
+ return stream
376
+
377
+
378
+ class ZipFilePathPointer(PathPointer):
379
+ """
380
+ A path pointer that identifies a file contained within a zipfile,
381
+ which can be accessed by reading that zipfile.
382
+ """
383
+
384
+ @py3_data
385
+ def __init__(self, zipfile, entry=""):
386
+ """
387
+ Create a new path pointer pointing at the specified entry
388
+ in the given zipfile.
389
+
390
+ :raise IOError: If the given zipfile does not exist, or if it
391
+ does not contain the specified entry.
392
+ """
393
+ if isinstance(zipfile, str):
394
+ zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
395
+
396
+ # Check that the entry exists:
397
+ if entry:
398
+
399
+ # Normalize the entry string, it should be relative:
400
+ entry = normalize_resource_name(entry, True, "/").lstrip("/")
401
+
402
+ try:
403
+ zipfile.getinfo(entry)
404
+ except Exception as e:
405
+ # Sometimes directories aren't explicitly listed in
406
+ # the zip file. So if `entry` is a directory name,
407
+ # then check if the zipfile contains any files that
408
+ # are under the given directory.
409
+ if entry.endswith("/") and [
410
+ n for n in zipfile.namelist() if n.startswith(entry)
411
+ ]:
412
+ pass # zipfile contains a file in that directory.
413
+ else:
414
+ # Otherwise, complain.
415
+ raise OSError(
416
+ f"Zipfile {zipfile.filename!r} does not contain {entry!r}"
417
+ ) from e
418
+ self._zipfile = zipfile
419
+ self._entry = entry
420
+
421
+ @property
422
+ def zipfile(self):
423
+ """
424
+ The zipfile.ZipFile object used to access the zip file
425
+ containing the entry identified by this path pointer.
426
+ """
427
+ return self._zipfile
428
+
429
+ @property
430
+ def entry(self):
431
+ """
432
+ The name of the file within zipfile that this path
433
+ pointer points to.
434
+ """
435
+ return self._entry
436
+
437
+ def open(self, encoding=None):
438
+ data = self._zipfile.read(self._entry)
439
+ stream = BytesIO(data)
440
+ if self._entry.endswith(".gz"):
441
+ stream = GzipFile(self._entry, fileobj=stream)
442
+ elif encoding is not None:
443
+ stream = SeekableUnicodeStreamReader(stream, encoding)
444
+ return stream
445
+
446
+ def file_size(self):
447
+ return self._zipfile.getinfo(self._entry).file_size
448
+
449
+ def join(self, fileid):
450
+ entry = f"{self._entry}/{fileid}"
451
+ return ZipFilePathPointer(self._zipfile, entry)
452
+
453
+ def __repr__(self):
454
+ return f"ZipFilePathPointer({self._zipfile.filename!r}, {self._entry!r})"
455
+
456
+ def __str__(self):
457
+ return os.path.normpath(os.path.join(self._zipfile.filename, self._entry))
458
+
459
+
460
+ ######################################################################
461
+ # Access Functions
462
+ ######################################################################
463
+
464
+ # Don't use a weak dictionary, because in the common case this
465
+ # causes a lot more reloading that necessary.
466
+ _resource_cache = {}
467
+ """A dictionary used to cache resources so that they won't
468
+ need to be loaded more than once."""
469
+
470
+
471
+ def find(resource_name, paths=None):
472
+ """
473
+ Find the given resource by searching through the directories and
474
+ zip files in paths, where a None or empty string specifies an absolute path.
475
+ Returns a corresponding path name. If the given resource is not
476
+ found, raise a ``LookupError``, whose message gives a pointer to
477
+ the installation instructions for the NLTK downloader.
478
+
479
+ Zip File Handling:
480
+
481
+ - If ``resource_name`` contains a component with a ``.zip``
482
+ extension, then it is assumed to be a zipfile; and the
483
+ remaining path components are used to look inside the zipfile.
484
+
485
+ - If any element of ``nltk.data.path`` has a ``.zip`` extension,
486
+ then it is assumed to be a zipfile.
487
+
488
+ - If a given resource name that does not contain any zipfile
489
+ component is not found initially, then ``find()`` will make a
490
+ second attempt to find that resource, by replacing each
491
+ component *p* in the path with *p.zip/p*. For example, this
492
+ allows ``find()`` to map the resource name
493
+ ``corpora/chat80/cities.pl`` to a zip file path pointer to
494
+ ``corpora/chat80.zip/chat80/cities.pl``.
495
+
496
+ - When using ``find()`` to locate a directory contained in a
497
+ zipfile, the resource name must end with the forward slash
498
+ character. Otherwise, ``find()`` will not locate the
499
+ directory.
500
+
501
+ :type resource_name: str or unicode
502
+ :param resource_name: The name of the resource to search for.
503
+ Resource names are posix-style relative path names, such as
504
+ ``corpora/brown``. Directory names will be
505
+ automatically converted to a platform-appropriate path separator.
506
+ :rtype: str
507
+ """
508
+ resource_name = normalize_resource_name(resource_name, True)
509
+
510
+ # Resolve default paths at runtime in-case the user overrides
511
+ # nltk.data.path
512
+ if paths is None:
513
+ paths = path
514
+
515
+ # Check if the resource name includes a zipfile name
516
+ m = re.match(r"(.*\.zip)/?(.*)$|", resource_name)
517
+ zipfile, zipentry = m.groups()
518
+
519
+ # Check each item in our path
520
+ for path_ in paths:
521
+ # Is the path item a zipfile?
522
+ if path_ and (os.path.isfile(path_) and path_.endswith(".zip")):
523
+ try:
524
+ return ZipFilePathPointer(path_, resource_name)
525
+ except OSError:
526
+ # resource not in zipfile
527
+ continue
528
+
529
+ # Is the path item a directory or is resource_name an absolute path?
530
+ elif not path_ or os.path.isdir(path_):
531
+ if zipfile is None:
532
+ p = os.path.join(path_, url2pathname(resource_name))
533
+ if os.path.exists(p):
534
+ if p.endswith(".gz"):
535
+ return GzipFileSystemPathPointer(p)
536
+ else:
537
+ return FileSystemPathPointer(p)
538
+ else:
539
+ p = os.path.join(path_, url2pathname(zipfile))
540
+ if os.path.exists(p):
541
+ try:
542
+ return ZipFilePathPointer(p, zipentry)
543
+ except OSError:
544
+ # resource not in zipfile
545
+ continue
546
+
547
+ # Fallback: if the path doesn't include a zip file, then try
548
+ # again, assuming that one of the path components is inside a
549
+ # zipfile of the same name.
550
+ if zipfile is None:
551
+ pieces = resource_name.split("/")
552
+ for i in range(len(pieces)):
553
+ modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:])
554
+ try:
555
+ return find(modified_name, paths)
556
+ except LookupError:
557
+ pass
558
+
559
+ # Identify the package (i.e. the .zip file) to download.
560
+ resource_zipname = resource_name.split("/")[1]
561
+ if resource_zipname.endswith(".zip"):
562
+ resource_zipname = resource_zipname.rpartition(".")[0]
563
+ # Display a friendly error message if the resource wasn't found:
564
+ msg = str(
565
+ "Resource \33[93m{resource}\033[0m not found.\n"
566
+ "Please use the NLTK Downloader to obtain the resource:\n\n"
567
+ "\33[31m" # To display red text in terminal.
568
+ ">>> import nltk\n"
569
+ ">>> nltk.download('{resource}')\n"
570
+ "\033[0m"
571
+ ).format(resource=resource_zipname)
572
+ msg = textwrap_indent(msg)
573
+
574
+ msg += "\n For more information see: https://www.nltk.org/data.html\n"
575
+
576
+ msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format(
577
+ resource_name=resource_name
578
+ )
579
+
580
+ msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths)
581
+ sep = "*" * 70
582
+ resource_not_found = f"\n{sep}\n{msg}\n{sep}\n"
583
+ raise LookupError(resource_not_found)
584
+
585
+
586
+ def retrieve(resource_url, filename=None, verbose=True):
587
+ """
588
+ Copy the given resource to a local file. If no filename is
589
+ specified, then use the URL's filename. If there is already a
590
+ file named ``filename``, then raise a ``ValueError``.
591
+
592
+ :type resource_url: str
593
+ :param resource_url: A URL specifying where the resource should be
594
+ loaded from. The default protocol is "nltk:", which searches
595
+ for the file in the the NLTK data package.
596
+ """
597
+ resource_url = normalize_resource_url(resource_url)
598
+ if filename is None:
599
+ if resource_url.startswith("file:"):
600
+ filename = os.path.split(resource_url)[-1]
601
+ else:
602
+ filename = re.sub(r"(^\w+:)?.*/", "", resource_url)
603
+ if os.path.exists(filename):
604
+ filename = os.path.abspath(filename)
605
+ raise ValueError("File %r already exists!" % filename)
606
+
607
+ if verbose:
608
+ print(f"Retrieving {resource_url!r}, saving to {filename!r}")
609
+
610
+ # Open the input & output streams.
611
+ infile = _open(resource_url)
612
+
613
+ # Copy infile -> outfile, using 64k blocks.
614
+ with open(filename, "wb") as outfile:
615
+ while True:
616
+ s = infile.read(1024 * 64) # 64k blocks.
617
+ outfile.write(s)
618
+ if not s:
619
+ break
620
+
621
+ infile.close()
622
+
623
+
624
+ #: A dictionary describing the formats that are supported by NLTK's
625
+ #: load() method. Keys are format names, and values are format
626
+ #: descriptions.
627
+ FORMATS = {
628
+ "pickle": "A serialized python object, stored using the pickle module.",
629
+ "json": "A serialized python object, stored using the json module.",
630
+ "yaml": "A serialized python object, stored using the yaml module.",
631
+ "cfg": "A context free grammar.",
632
+ "pcfg": "A probabilistic CFG.",
633
+ "fcfg": "A feature CFG.",
634
+ "fol": "A list of first order logic expressions, parsed with "
635
+ "nltk.sem.logic.Expression.fromstring.",
636
+ "logic": "A list of first order logic expressions, parsed with "
637
+ "nltk.sem.logic.LogicParser. Requires an additional logic_parser "
638
+ "parameter",
639
+ "val": "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.",
640
+ "raw": "The raw (byte string) contents of a file.",
641
+ "text": "The raw (unicode string) contents of a file. ",
642
+ }
643
+
644
+ #: A dictionary mapping from file extensions to format names, used
645
+ #: by load() when format="auto" to decide the format for a
646
+ #: given resource url.
647
+ AUTO_FORMATS = {
648
+ "pickle": "pickle",
649
+ "json": "json",
650
+ "yaml": "yaml",
651
+ "cfg": "cfg",
652
+ "pcfg": "pcfg",
653
+ "fcfg": "fcfg",
654
+ "fol": "fol",
655
+ "logic": "logic",
656
+ "val": "val",
657
+ "txt": "text",
658
+ "text": "text",
659
+ }
660
+
661
+
662
+ def load(
663
+ resource_url,
664
+ format="auto",
665
+ cache=True,
666
+ verbose=False,
667
+ logic_parser=None,
668
+ fstruct_reader=None,
669
+ encoding=None,
670
+ ):
671
+ """
672
+ Load a given resource from the NLTK data package. The following
673
+ resource formats are currently supported:
674
+
675
+ - ``pickle``
676
+ - ``json``
677
+ - ``yaml``
678
+ - ``cfg`` (context free grammars)
679
+ - ``pcfg`` (probabilistic CFGs)
680
+ - ``fcfg`` (feature-based CFGs)
681
+ - ``fol`` (formulas of First Order Logic)
682
+ - ``logic`` (Logical formulas to be parsed by the given logic_parser)
683
+ - ``val`` (valuation of First Order Logic model)
684
+ - ``text`` (the file contents as a unicode string)
685
+ - ``raw`` (the raw file contents as a byte string)
686
+
687
+ If no format is specified, ``load()`` will attempt to determine a
688
+ format based on the resource name's file extension. If that
689
+ fails, ``load()`` will raise a ``ValueError`` exception.
690
+
691
+ For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``),
692
+ it tries to decode the raw contents using UTF-8, and if that doesn't
693
+ work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding``
694
+ is specified.
695
+
696
+ :type resource_url: str
697
+ :param resource_url: A URL specifying where the resource should be
698
+ loaded from. The default protocol is "nltk:", which searches
699
+ for the file in the the NLTK data package.
700
+ :type cache: bool
701
+ :param cache: If true, add this resource to a cache. If load()
702
+ finds a resource in its cache, then it will return it from the
703
+ cache rather than loading it.
704
+ :type verbose: bool
705
+ :param verbose: If true, print a message when loading a resource.
706
+ Messages are not displayed when a resource is retrieved from
707
+ the cache.
708
+ :type logic_parser: LogicParser
709
+ :param logic_parser: The parser that will be used to parse logical
710
+ expressions.
711
+ :type fstruct_reader: FeatStructReader
712
+ :param fstruct_reader: The parser that will be used to parse the
713
+ feature structure of an fcfg.
714
+ :type encoding: str
715
+ :param encoding: the encoding of the input; only used for text formats.
716
+ """
717
+ resource_url = normalize_resource_url(resource_url)
718
+ resource_url = add_py3_data(resource_url)
719
+
720
+ # Determine the format of the resource.
721
+ if format == "auto":
722
+ resource_url_parts = resource_url.split(".")
723
+ ext = resource_url_parts[-1]
724
+ if ext == "gz":
725
+ ext = resource_url_parts[-2]
726
+ format = AUTO_FORMATS.get(ext)
727
+ if format is None:
728
+ raise ValueError(
729
+ "Could not determine format for %s based "
730
+ 'on its file\nextension; use the "format" '
731
+ "argument to specify the format explicitly." % resource_url
732
+ )
733
+
734
+ if format not in FORMATS:
735
+ raise ValueError(f"Unknown format type: {format}!")
736
+
737
+ # If we've cached the resource, then just return it.
738
+ if cache:
739
+ resource_val = _resource_cache.get((resource_url, format))
740
+ if resource_val is not None:
741
+ if verbose:
742
+ print(f"<<Using cached copy of {resource_url}>>")
743
+ return resource_val
744
+
745
+ # Let the user know what's going on.
746
+ if verbose:
747
+ print(f"<<Loading {resource_url}>>")
748
+
749
+ # Load the resource.
750
+ opened_resource = _open(resource_url)
751
+
752
+ if format == "raw":
753
+ resource_val = opened_resource.read()
754
+ elif format == "pickle":
755
+ resource_val = pickle.load(opened_resource)
756
+ elif format == "json":
757
+ import json
758
+
759
+ from nltk.jsontags import json_tags
760
+
761
+ resource_val = json.load(opened_resource)
762
+ tag = None
763
+ if len(resource_val) != 1:
764
+ tag = next(resource_val.keys())
765
+ if tag not in json_tags:
766
+ raise ValueError("Unknown json tag.")
767
+ elif format == "yaml":
768
+ import yaml
769
+
770
+ resource_val = yaml.safe_load(opened_resource)
771
+ else:
772
+ # The resource is a text format.
773
+ binary_data = opened_resource.read()
774
+ if encoding is not None:
775
+ string_data = binary_data.decode(encoding)
776
+ else:
777
+ try:
778
+ string_data = binary_data.decode("utf-8")
779
+ except UnicodeDecodeError:
780
+ string_data = binary_data.decode("latin-1")
781
+ if format == "text":
782
+ resource_val = string_data
783
+ elif format == "cfg":
784
+ resource_val = grammar.CFG.fromstring(string_data, encoding=encoding)
785
+ elif format == "pcfg":
786
+ resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding)
787
+ elif format == "fcfg":
788
+ resource_val = grammar.FeatureGrammar.fromstring(
789
+ string_data,
790
+ logic_parser=logic_parser,
791
+ fstruct_reader=fstruct_reader,
792
+ encoding=encoding,
793
+ )
794
+ elif format == "fol":
795
+ resource_val = sem.read_logic(
796
+ string_data,
797
+ logic_parser=sem.logic.LogicParser(),
798
+ encoding=encoding,
799
+ )
800
+ elif format == "logic":
801
+ resource_val = sem.read_logic(
802
+ string_data, logic_parser=logic_parser, encoding=encoding
803
+ )
804
+ elif format == "val":
805
+ resource_val = sem.read_valuation(string_data, encoding=encoding)
806
+ else:
807
+ raise AssertionError(
808
+ "Internal NLTK error: Format %s isn't "
809
+ "handled by nltk.data.load()" % (format,)
810
+ )
811
+
812
+ opened_resource.close()
813
+
814
+ # If requested, add it to the cache.
815
+ if cache:
816
+ try:
817
+ _resource_cache[(resource_url, format)] = resource_val
818
+ # TODO: add this line
819
+ # print('<<Caching a copy of %s>>' % (resource_url,))
820
+ except TypeError:
821
+ # We can't create weak references to some object types, like
822
+ # strings and tuples. For now, just don't cache them.
823
+ pass
824
+
825
+ return resource_val
826
+
827
+
828
+ def show_cfg(resource_url, escape="##"):
829
+ """
830
+ Write out a grammar file, ignoring escaped and empty lines.
831
+
832
+ :type resource_url: str
833
+ :param resource_url: A URL specifying where the resource should be
834
+ loaded from. The default protocol is "nltk:", which searches
835
+ for the file in the the NLTK data package.
836
+ :type escape: str
837
+ :param escape: Prepended string that signals lines to be ignored
838
+ """
839
+ resource_url = normalize_resource_url(resource_url)
840
+ resource_val = load(resource_url, format="text", cache=False)
841
+ lines = resource_val.splitlines()
842
+ for l in lines:
843
+ if l.startswith(escape):
844
+ continue
845
+ if re.match("^$", l):
846
+ continue
847
+ print(l)
848
+
849
+
850
+ def clear_cache():
851
+ """
852
+ Remove all objects from the resource cache.
853
+ :see: load()
854
+ """
855
+ _resource_cache.clear()
856
+
857
+
858
+ def _open(resource_url):
859
+ """
860
+ Helper function that returns an open file object for a resource,
861
+ given its resource URL. If the given resource URL uses the "nltk:"
862
+ protocol, or uses no protocol, then use ``nltk.data.find`` to find
863
+ its path, and open it with the given mode; if the resource URL
864
+ uses the 'file' protocol, then open the file with the given mode;
865
+ otherwise, delegate to ``urllib2.urlopen``.
866
+
867
+ :type resource_url: str
868
+ :param resource_url: A URL specifying where the resource should be
869
+ loaded from. The default protocol is "nltk:", which searches
870
+ for the file in the the NLTK data package.
871
+ """
872
+ resource_url = normalize_resource_url(resource_url)
873
+ protocol, path_ = split_resource_url(resource_url)
874
+
875
+ if protocol is None or protocol.lower() == "nltk":
876
+ return find(path_, path + [""]).open()
877
+ elif protocol.lower() == "file":
878
+ # urllib might not use mode='rb', so handle this one ourselves:
879
+ return find(path_, [""]).open()
880
+ else:
881
+ return urlopen(resource_url)
882
+
883
+
884
+ ######################################################################
885
+ # Lazy Resource Loader
886
+ ######################################################################
887
+
888
+
889
+ class LazyLoader:
890
+ @py3_data
891
+ def __init__(self, _path):
892
+ self._path = _path
893
+
894
+ def __load(self):
895
+ resource = load(self._path)
896
+ # This is where the magic happens! Transform ourselves into
897
+ # the object by modifying our own __dict__ and __class__ to
898
+ # match that of `resource`.
899
+ self.__dict__ = resource.__dict__
900
+ self.__class__ = resource.__class__
901
+
902
+ def __getattr__(self, attr):
903
+ self.__load()
904
+ # This looks circular, but its not, since __load() changes our
905
+ # __class__ to something new:
906
+ return getattr(self, attr)
907
+
908
+ def __repr__(self):
909
+ self.__load()
910
+ # This looks circular, but its not, since __load() changes our
911
+ # __class__ to something new:
912
+ return repr(self)
913
+
914
+
915
+ ######################################################################
916
+ # Open-On-Demand ZipFile
917
+ ######################################################################
918
+
919
+
920
+ class OpenOnDemandZipFile(zipfile.ZipFile):
921
+ """
922
+ A subclass of ``zipfile.ZipFile`` that closes its file pointer
923
+ whenever it is not using it; and re-opens it when it needs to read
924
+ data from the zipfile. This is useful for reducing the number of
925
+ open file handles when many zip files are being accessed at once.
926
+ ``OpenOnDemandZipFile`` must be constructed from a filename, not a
927
+ file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is
928
+ read-only (i.e. ``write()`` and ``writestr()`` are disabled.
929
+ """
930
+
931
+ @py3_data
932
+ def __init__(self, filename):
933
+ if not isinstance(filename, str):
934
+ raise TypeError("ReopenableZipFile filename must be a string")
935
+ zipfile.ZipFile.__init__(self, filename)
936
+ assert self.filename == filename
937
+ self.close()
938
+ # After closing a ZipFile object, the _fileRefCnt needs to be cleared
939
+ # for Python2and3 compatible code.
940
+ self._fileRefCnt = 0
941
+
942
+ def read(self, name):
943
+ assert self.fp is None
944
+ self.fp = open(self.filename, "rb")
945
+ value = zipfile.ZipFile.read(self, name)
946
+ # Ensure that _fileRefCnt needs to be set for Python2and3 compatible code.
947
+ # Since we only opened one file here, we add 1.
948
+ self._fileRefCnt += 1
949
+ self.close()
950
+ return value
951
+
952
+ def write(self, *args, **kwargs):
953
+ """:raise NotImplementedError: OpenOnDemandZipfile is read-only"""
954
+ raise NotImplementedError("OpenOnDemandZipfile is read-only")
955
+
956
+ def writestr(self, *args, **kwargs):
957
+ """:raise NotImplementedError: OpenOnDemandZipfile is read-only"""
958
+ raise NotImplementedError("OpenOnDemandZipfile is read-only")
959
+
960
+ def __repr__(self):
961
+ return repr("OpenOnDemandZipFile(%r)" % self.filename)
962
+
963
+
964
+ ######################################################################
965
+ # Seekable Unicode Stream Reader
966
+ ######################################################################
967
+
968
+
969
+ class SeekableUnicodeStreamReader:
970
+ """
971
+ A stream reader that automatically encodes the source byte stream
972
+ into unicode (like ``codecs.StreamReader``); but still supports the
973
+ ``seek()`` and ``tell()`` operations correctly. This is in contrast
974
+ to ``codecs.StreamReader``, which provide *broken* ``seek()`` and
975
+ ``tell()`` methods.
976
+
977
+ This class was motivated by ``StreamBackedCorpusView``, which
978
+ makes extensive use of ``seek()`` and ``tell()``, and needs to be
979
+ able to handle unicode-encoded files.
980
+
981
+ Note: this class requires stateless decoders. To my knowledge,
982
+ this shouldn't cause a problem with any of python's builtin
983
+ unicode encodings.
984
+ """
985
+
986
+ DEBUG = True # : If true, then perform extra sanity checks.
987
+
988
+ @py3_data
989
+ def __init__(self, stream, encoding, errors="strict"):
990
+ # Rewind the stream to its beginning.
991
+ stream.seek(0)
992
+
993
+ self.stream = stream
994
+ """The underlying stream."""
995
+
996
+ self.encoding = encoding
997
+ """The name of the encoding that should be used to encode the
998
+ underlying stream."""
999
+
1000
+ self.errors = errors
1001
+ """The error mode that should be used when decoding data from
1002
+ the underlying stream. Can be 'strict', 'ignore', or
1003
+ 'replace'."""
1004
+
1005
+ self.decode = codecs.getdecoder(encoding)
1006
+ """The function that is used to decode byte strings into
1007
+ unicode strings."""
1008
+
1009
+ self.bytebuffer = b""
1010
+ """A buffer to use bytes that have been read but have not yet
1011
+ been decoded. This is only used when the final bytes from
1012
+ a read do not form a complete encoding for a character."""
1013
+
1014
+ self.linebuffer = None
1015
+ """A buffer used by ``readline()`` to hold characters that have
1016
+ been read, but have not yet been returned by ``read()`` or
1017
+ ``readline()``. This buffer consists of a list of unicode
1018
+ strings, where each string corresponds to a single line.
1019
+ The final element of the list may or may not be a complete
1020
+ line. Note that the existence of a linebuffer makes the
1021
+ ``tell()`` operation more complex, because it must backtrack
1022
+ to the beginning of the buffer to determine the correct
1023
+ file position in the underlying byte stream."""
1024
+
1025
+ self._rewind_checkpoint = 0
1026
+ """The file position at which the most recent read on the
1027
+ underlying stream began. This is used, together with
1028
+ ``_rewind_numchars``, to backtrack to the beginning of
1029
+ ``linebuffer`` (which is required by ``tell()``)."""
1030
+
1031
+ self._rewind_numchars = None
1032
+ """The number of characters that have been returned since the
1033
+ read that started at ``_rewind_checkpoint``. This is used,
1034
+ together with ``_rewind_checkpoint``, to backtrack to the
1035
+ beginning of ``linebuffer`` (which is required by ``tell()``)."""
1036
+
1037
+ self._bom = self._check_bom()
1038
+ """The length of the byte order marker at the beginning of
1039
+ the stream (or None for no byte order marker)."""
1040
+
1041
+ # /////////////////////////////////////////////////////////////////
1042
+ # Read methods
1043
+ # /////////////////////////////////////////////////////////////////
1044
+
1045
+ def read(self, size=None):
1046
+ """
1047
+ Read up to ``size`` bytes, decode them using this reader's
1048
+ encoding, and return the resulting unicode string.
1049
+
1050
+ :param size: The maximum number of bytes to read. If not
1051
+ specified, then read as many bytes as possible.
1052
+ :type size: int
1053
+ :rtype: unicode
1054
+ """
1055
+ chars = self._read(size)
1056
+
1057
+ # If linebuffer is not empty, then include it in the result
1058
+ if self.linebuffer:
1059
+ chars = "".join(self.linebuffer) + chars
1060
+ self.linebuffer = None
1061
+ self._rewind_numchars = None
1062
+
1063
+ return chars
1064
+
1065
+ def discard_line(self):
1066
+ if self.linebuffer and len(self.linebuffer) > 1:
1067
+ line = self.linebuffer.pop(0)
1068
+ self._rewind_numchars += len(line)
1069
+ else:
1070
+ self.stream.readline()
1071
+
1072
+ def readline(self, size=None):
1073
+ """
1074
+ Read a line of text, decode it using this reader's encoding,
1075
+ and return the resulting unicode string.
1076
+
1077
+ :param size: The maximum number of bytes to read. If no
1078
+ newline is encountered before ``size`` bytes have been read,
1079
+ then the returned value may not be a complete line of text.
1080
+ :type size: int
1081
+ """
1082
+ # If we have a non-empty linebuffer, then return the first
1083
+ # line from it. (Note that the last element of linebuffer may
1084
+ # not be a complete line; so let _read() deal with it.)
1085
+ if self.linebuffer and len(self.linebuffer) > 1:
1086
+ line = self.linebuffer.pop(0)
1087
+ self._rewind_numchars += len(line)
1088
+ return line
1089
+
1090
+ readsize = size or 72
1091
+ chars = ""
1092
+
1093
+ # If there's a remaining incomplete line in the buffer, add it.
1094
+ if self.linebuffer:
1095
+ chars += self.linebuffer.pop()
1096
+ self.linebuffer = None
1097
+
1098
+ while True:
1099
+ startpos = self.stream.tell() - len(self.bytebuffer)
1100
+ new_chars = self._read(readsize)
1101
+
1102
+ # If we're at a '\r', then read one extra character, since
1103
+ # it might be a '\n', to get the proper line ending.
1104
+ if new_chars and new_chars.endswith("\r"):
1105
+ new_chars += self._read(1)
1106
+
1107
+ chars += new_chars
1108
+ lines = chars.splitlines(True)
1109
+ if len(lines) > 1:
1110
+ line = lines[0]
1111
+ self.linebuffer = lines[1:]
1112
+ self._rewind_numchars = len(new_chars) - (len(chars) - len(line))
1113
+ self._rewind_checkpoint = startpos
1114
+ break
1115
+ elif len(lines) == 1:
1116
+ line0withend = lines[0]
1117
+ line0withoutend = lines[0].splitlines(False)[0]
1118
+ if line0withend != line0withoutend: # complete line
1119
+ line = line0withend
1120
+ break
1121
+
1122
+ if not new_chars or size is not None:
1123
+ line = chars
1124
+ break
1125
+
1126
+ # Read successively larger blocks of text.
1127
+ if readsize < 8000:
1128
+ readsize *= 2
1129
+
1130
+ return line
1131
+
1132
+ def readlines(self, sizehint=None, keepends=True):
1133
+ """
1134
+ Read this file's contents, decode them using this reader's
1135
+ encoding, and return it as a list of unicode lines.
1136
+
1137
+ :rtype: list(unicode)
1138
+ :param sizehint: Ignored.
1139
+ :param keepends: If false, then strip newlines.
1140
+ """
1141
+ return self.read().splitlines(keepends)
1142
+
1143
+ def next(self):
1144
+ """Return the next decoded line from the underlying stream."""
1145
+ line = self.readline()
1146
+ if line:
1147
+ return line
1148
+ else:
1149
+ raise StopIteration
1150
+
1151
+ def __next__(self):
1152
+ return self.next()
1153
+
1154
+ def __iter__(self):
1155
+ """Return self"""
1156
+ return self
1157
+
1158
+ def __del__(self):
1159
+ # let garbage collector deal with still opened streams
1160
+ if not self.closed:
1161
+ self.close()
1162
+
1163
+ def __enter__(self):
1164
+ return self
1165
+
1166
+ def __exit__(self, type, value, traceback):
1167
+ self.close()
1168
+
1169
+ def xreadlines(self):
1170
+ """Return self"""
1171
+ return self
1172
+
1173
+ # /////////////////////////////////////////////////////////////////
1174
+ # Pass-through methods & properties
1175
+ # /////////////////////////////////////////////////////////////////
1176
+
1177
+ @property
1178
+ def closed(self):
1179
+ """True if the underlying stream is closed."""
1180
+ return self.stream.closed
1181
+
1182
+ @property
1183
+ def name(self):
1184
+ """The name of the underlying stream."""
1185
+ return self.stream.name
1186
+
1187
+ @property
1188
+ def mode(self):
1189
+ """The mode of the underlying stream."""
1190
+ return self.stream.mode
1191
+
1192
+ def close(self):
1193
+ """
1194
+ Close the underlying stream.
1195
+ """
1196
+ self.stream.close()
1197
+
1198
+ # /////////////////////////////////////////////////////////////////
1199
+ # Seek and tell
1200
+ # /////////////////////////////////////////////////////////////////
1201
+
1202
+ def seek(self, offset, whence=0):
1203
+ """
1204
+ Move the stream to a new file position. If the reader is
1205
+ maintaining any buffers, then they will be cleared.
1206
+
1207
+ :param offset: A byte count offset.
1208
+ :param whence: If 0, then the offset is from the start of the file
1209
+ (offset should be positive), if 1, then the offset is from the
1210
+ current position (offset may be positive or negative); and if 2,
1211
+ then the offset is from the end of the file (offset should
1212
+ typically be negative).
1213
+ """
1214
+ if whence == 1:
1215
+ raise ValueError(
1216
+ "Relative seek is not supported for "
1217
+ "SeekableUnicodeStreamReader -- consider "
1218
+ "using char_seek_forward() instead."
1219
+ )
1220
+ self.stream.seek(offset, whence)
1221
+ self.linebuffer = None
1222
+ self.bytebuffer = b""
1223
+ self._rewind_numchars = None
1224
+ self._rewind_checkpoint = self.stream.tell()
1225
+
1226
+ def char_seek_forward(self, offset):
1227
+ """
1228
+ Move the read pointer forward by ``offset`` characters.
1229
+ """
1230
+ if offset < 0:
1231
+ raise ValueError("Negative offsets are not supported")
1232
+ # Clear all buffers.
1233
+ self.seek(self.tell())
1234
+ # Perform the seek operation.
1235
+ self._char_seek_forward(offset)
1236
+
1237
+ def _char_seek_forward(self, offset, est_bytes=None):
1238
+ """
1239
+ Move the file position forward by ``offset`` characters,
1240
+ ignoring all buffers.
1241
+
1242
+ :param est_bytes: A hint, giving an estimate of the number of
1243
+ bytes that will be needed to move forward by ``offset`` chars.
1244
+ Defaults to ``offset``.
1245
+ """
1246
+ if est_bytes is None:
1247
+ est_bytes = offset
1248
+ bytes = b""
1249
+
1250
+ while True:
1251
+ # Read in a block of bytes.
1252
+ newbytes = self.stream.read(est_bytes - len(bytes))
1253
+ bytes += newbytes
1254
+
1255
+ # Decode the bytes to characters.
1256
+ chars, bytes_decoded = self._incr_decode(bytes)
1257
+
1258
+ # If we got the right number of characters, then seek
1259
+ # backwards over any truncated characters, and return.
1260
+ if len(chars) == offset:
1261
+ self.stream.seek(-len(bytes) + bytes_decoded, 1)
1262
+ return
1263
+
1264
+ # If we went too far, then we can back-up until we get it
1265
+ # right, using the bytes we've already read.
1266
+ if len(chars) > offset:
1267
+ while len(chars) > offset:
1268
+ # Assume at least one byte/char.
1269
+ est_bytes += offset - len(chars)
1270
+ chars, bytes_decoded = self._incr_decode(bytes[:est_bytes])
1271
+ self.stream.seek(-len(bytes) + bytes_decoded, 1)
1272
+ return
1273
+
1274
+ # Otherwise, we haven't read enough bytes yet; loop again.
1275
+ est_bytes += offset - len(chars)
1276
+
1277
+ def tell(self):
1278
+ """
1279
+ Return the current file position on the underlying byte
1280
+ stream. If this reader is maintaining any buffers, then the
1281
+ returned file position will be the position of the beginning
1282
+ of those buffers.
1283
+ """
1284
+ # If nothing's buffered, then just return our current filepos:
1285
+ if self.linebuffer is None:
1286
+ return self.stream.tell() - len(self.bytebuffer)
1287
+
1288
+ # Otherwise, we'll need to backtrack the filepos until we
1289
+ # reach the beginning of the buffer.
1290
+
1291
+ # Store our original file position, so we can return here.
1292
+ orig_filepos = self.stream.tell()
1293
+
1294
+ # Calculate an estimate of where we think the newline is.
1295
+ bytes_read = (orig_filepos - len(self.bytebuffer)) - self._rewind_checkpoint
1296
+ buf_size = sum(len(line) for line in self.linebuffer)
1297
+ est_bytes = int(
1298
+ bytes_read * self._rewind_numchars / (self._rewind_numchars + buf_size)
1299
+ )
1300
+
1301
+ self.stream.seek(self._rewind_checkpoint)
1302
+ self._char_seek_forward(self._rewind_numchars, est_bytes)
1303
+ filepos = self.stream.tell()
1304
+
1305
+ # Sanity check
1306
+ if self.DEBUG:
1307
+ self.stream.seek(filepos)
1308
+ check1 = self._incr_decode(self.stream.read(50))[0]
1309
+ check2 = "".join(self.linebuffer)
1310
+ assert check1.startswith(check2) or check2.startswith(check1)
1311
+
1312
+ # Return to our original filepos (so we don't have to throw
1313
+ # out our buffer.)
1314
+ self.stream.seek(orig_filepos)
1315
+
1316
+ # Return the calculated filepos
1317
+ return filepos
1318
+
1319
+ # /////////////////////////////////////////////////////////////////
1320
+ # Helper methods
1321
+ # /////////////////////////////////////////////////////////////////
1322
+
1323
+ def _read(self, size=None):
1324
+ """
1325
+ Read up to ``size`` bytes from the underlying stream, decode
1326
+ them using this reader's encoding, and return the resulting
1327
+ unicode string. ``linebuffer`` is not included in the result.
1328
+ """
1329
+ if size == 0:
1330
+ return ""
1331
+
1332
+ # Skip past the byte order marker, if present.
1333
+ if self._bom and self.stream.tell() == 0:
1334
+ self.stream.read(self._bom)
1335
+
1336
+ # Read the requested number of bytes.
1337
+ if size is None:
1338
+ new_bytes = self.stream.read()
1339
+ else:
1340
+ new_bytes = self.stream.read(size)
1341
+ bytes = self.bytebuffer + new_bytes
1342
+
1343
+ # Decode the bytes into unicode characters
1344
+ chars, bytes_decoded = self._incr_decode(bytes)
1345
+
1346
+ # If we got bytes but couldn't decode any, then read further.
1347
+ if (size is not None) and (not chars) and (len(new_bytes) > 0):
1348
+ while not chars:
1349
+ new_bytes = self.stream.read(1)
1350
+ if not new_bytes:
1351
+ break # end of file.
1352
+ bytes += new_bytes
1353
+ chars, bytes_decoded = self._incr_decode(bytes)
1354
+
1355
+ # Record any bytes we didn't consume.
1356
+ self.bytebuffer = bytes[bytes_decoded:]
1357
+
1358
+ # Return the result
1359
+ return chars
1360
+
1361
+ def _incr_decode(self, bytes):
1362
+ """
1363
+ Decode the given byte string into a unicode string, using this
1364
+ reader's encoding. If an exception is encountered that
1365
+ appears to be caused by a truncation error, then just decode
1366
+ the byte string without the bytes that cause the trunctaion
1367
+ error.
1368
+
1369
+ Return a tuple ``(chars, num_consumed)``, where ``chars`` is
1370
+ the decoded unicode string, and ``num_consumed`` is the
1371
+ number of bytes that were consumed.
1372
+ """
1373
+ while True:
1374
+ try:
1375
+ return self.decode(bytes, "strict")
1376
+ except UnicodeDecodeError as exc:
1377
+ # If the exception occurs at the end of the string,
1378
+ # then assume that it's a truncation error.
1379
+ if exc.end == len(bytes):
1380
+ return self.decode(bytes[: exc.start], self.errors)
1381
+
1382
+ # Otherwise, if we're being strict, then raise it.
1383
+ elif self.errors == "strict":
1384
+ raise
1385
+
1386
+ # If we're not strict, then re-process it with our
1387
+ # errors setting. This *may* raise an exception.
1388
+ else:
1389
+ return self.decode(bytes, self.errors)
1390
+
1391
+ _BOM_TABLE = {
1392
+ "utf8": [(codecs.BOM_UTF8, None)],
1393
+ "utf16": [(codecs.BOM_UTF16_LE, "utf16-le"), (codecs.BOM_UTF16_BE, "utf16-be")],
1394
+ "utf16le": [(codecs.BOM_UTF16_LE, None)],
1395
+ "utf16be": [(codecs.BOM_UTF16_BE, None)],
1396
+ "utf32": [(codecs.BOM_UTF32_LE, "utf32-le"), (codecs.BOM_UTF32_BE, "utf32-be")],
1397
+ "utf32le": [(codecs.BOM_UTF32_LE, None)],
1398
+ "utf32be": [(codecs.BOM_UTF32_BE, None)],
1399
+ }
1400
+
1401
+ def _check_bom(self):
1402
+ # Normalize our encoding name
1403
+ enc = re.sub("[ -]", "", self.encoding.lower())
1404
+
1405
+ # Look up our encoding in the BOM table.
1406
+ bom_info = self._BOM_TABLE.get(enc)
1407
+
1408
+ if bom_info:
1409
+ # Read a prefix, to check against the BOM(s)
1410
+ bytes = self.stream.read(16)
1411
+ self.stream.seek(0)
1412
+
1413
+ # Check for each possible BOM.
1414
+ for (bom, new_encoding) in bom_info:
1415
+ if bytes.startswith(bom):
1416
+ if new_encoding:
1417
+ self.encoding = new_encoding
1418
+ return len(bom)
1419
+
1420
+ return None
1421
+
1422
+
1423
+ __all__ = [
1424
+ "path",
1425
+ "PathPointer",
1426
+ "FileSystemPathPointer",
1427
+ "BufferedGzipFile",
1428
+ "GzipFileSystemPathPointer",
1429
+ "GzipFileSystemPathPointer",
1430
+ "find",
1431
+ "retrieve",
1432
+ "FORMATS",
1433
+ "AUTO_FORMATS",
1434
+ "load",
1435
+ "show_cfg",
1436
+ "clear_cache",
1437
+ "LazyLoader",
1438
+ "OpenOnDemandZipFile",
1439
+ "GzipFileSystemPathPointer",
1440
+ "SeekableUnicodeStreamReader",
1441
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/decorators.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Decorator module by Michele Simionato <[email protected]>
3
+ Copyright Michele Simionato, distributed under the terms of the BSD License (see below).
4
+ http://www.phyast.pitt.edu/~micheles/python/documentation.html
5
+
6
+ Included in NLTK for its support of a nice memoization decorator.
7
+ """
8
+
9
+ __docformat__ = "restructuredtext en"
10
+
11
+ ## The basic trick is to generate the source code for the decorated function
12
+ ## with the right signature and to evaluate it.
13
+ ## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator
14
+ ## to understand what is going on.
15
+
16
+ __all__ = ["decorator", "new_wrapper", "getinfo"]
17
+
18
+ import sys
19
+
20
+ # Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
21
+ # the Python standard library.
22
+ OLD_SYS_PATH = sys.path[:]
23
+ sys.path = [p for p in sys.path if p and "nltk" not in str(p)]
24
+ import inspect
25
+
26
+ sys.path = OLD_SYS_PATH
27
+
28
+
29
+ def __legacysignature(signature):
30
+ """
31
+ For retrocompatibility reasons, we don't use a standard Signature.
32
+ Instead, we use the string generated by this method.
33
+ Basically, from a Signature we create a string and remove the default values.
34
+ """
35
+ listsignature = str(signature)[1:-1].split(",")
36
+ for counter, param in enumerate(listsignature):
37
+ if param.count("=") > 0:
38
+ listsignature[counter] = param[0 : param.index("=")].strip()
39
+ else:
40
+ listsignature[counter] = param.strip()
41
+ return ", ".join(listsignature)
42
+
43
+
44
+ def getinfo(func):
45
+ """
46
+ Returns an info dictionary containing:
47
+ - name (the name of the function : str)
48
+ - argnames (the names of the arguments : list)
49
+ - defaults (the values of the default arguments : tuple)
50
+ - signature (the signature : str)
51
+ - fullsignature (the full signature : Signature)
52
+ - doc (the docstring : str)
53
+ - module (the module name : str)
54
+ - dict (the function __dict__ : str)
55
+
56
+ >>> def f(self, x=1, y=2, *args, **kw): pass
57
+
58
+ >>> info = getinfo(f)
59
+
60
+ >>> info["name"]
61
+ 'f'
62
+ >>> info["argnames"]
63
+ ['self', 'x', 'y', 'args', 'kw']
64
+
65
+ >>> info["defaults"]
66
+ (1, 2)
67
+
68
+ >>> info["signature"]
69
+ 'self, x, y, *args, **kw'
70
+
71
+ >>> info["fullsignature"]
72
+ <Signature (self, x=1, y=2, *args, **kw)>
73
+ """
74
+ assert inspect.ismethod(func) or inspect.isfunction(func)
75
+ argspec = inspect.getfullargspec(func)
76
+ regargs, varargs, varkwargs = argspec[:3]
77
+ argnames = list(regargs)
78
+ if varargs:
79
+ argnames.append(varargs)
80
+ if varkwargs:
81
+ argnames.append(varkwargs)
82
+ fullsignature = inspect.signature(func)
83
+ # Convert Signature to str
84
+ signature = __legacysignature(fullsignature)
85
+
86
+ # pypy compatibility
87
+ if hasattr(func, "__closure__"):
88
+ _closure = func.__closure__
89
+ _globals = func.__globals__
90
+ else:
91
+ _closure = func.func_closure
92
+ _globals = func.func_globals
93
+
94
+ return dict(
95
+ name=func.__name__,
96
+ argnames=argnames,
97
+ signature=signature,
98
+ fullsignature=fullsignature,
99
+ defaults=func.__defaults__,
100
+ doc=func.__doc__,
101
+ module=func.__module__,
102
+ dict=func.__dict__,
103
+ globals=_globals,
104
+ closure=_closure,
105
+ )
106
+
107
+
108
+ def update_wrapper(wrapper, model, infodict=None):
109
+ "akin to functools.update_wrapper"
110
+ infodict = infodict or getinfo(model)
111
+ wrapper.__name__ = infodict["name"]
112
+ wrapper.__doc__ = infodict["doc"]
113
+ wrapper.__module__ = infodict["module"]
114
+ wrapper.__dict__.update(infodict["dict"])
115
+ wrapper.__defaults__ = infodict["defaults"]
116
+ wrapper.undecorated = model
117
+ return wrapper
118
+
119
+
120
+ def new_wrapper(wrapper, model):
121
+ """
122
+ An improvement over functools.update_wrapper. The wrapper is a generic
123
+ callable object. It works by generating a copy of the wrapper with the
124
+ right signature and by updating the copy, not the original.
125
+ Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',
126
+ 'dict', 'defaults'.
127
+ """
128
+ if isinstance(model, dict):
129
+ infodict = model
130
+ else: # assume model is a function
131
+ infodict = getinfo(model)
132
+ assert (
133
+ not "_wrapper_" in infodict["argnames"]
134
+ ), '"_wrapper_" is a reserved argument name!'
135
+ src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict
136
+ funcopy = eval(src, dict(_wrapper_=wrapper))
137
+ return update_wrapper(funcopy, model, infodict)
138
+
139
+
140
+ # helper used in decorator_factory
141
+ def __call__(self, func):
142
+ return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func)
143
+
144
+
145
+ def decorator_factory(cls):
146
+ """
147
+ Take a class with a ``.caller`` method and return a callable decorator
148
+ object. It works by adding a suitable __call__ method to the class;
149
+ it raises a TypeError if the class already has a nontrivial __call__
150
+ method.
151
+ """
152
+ attrs = set(dir(cls))
153
+ if "__call__" in attrs:
154
+ raise TypeError(
155
+ "You cannot decorate a class with a nontrivial " "__call__ method"
156
+ )
157
+ if "call" not in attrs:
158
+ raise TypeError("You cannot decorate a class without a " ".call method")
159
+ cls.__call__ = __call__
160
+ return cls
161
+
162
+
163
+ def decorator(caller):
164
+ """
165
+ General purpose decorator factory: takes a caller function as
166
+ input and returns a decorator with the same attributes.
167
+ A caller function is any function like this::
168
+
169
+ def caller(func, *args, **kw):
170
+ # do something
171
+ return func(*args, **kw)
172
+
173
+ Here is an example of usage:
174
+
175
+ >>> @decorator
176
+ ... def chatty(f, *args, **kw):
177
+ ... print("Calling %r" % f.__name__)
178
+ ... return f(*args, **kw)
179
+
180
+ >>> chatty.__name__
181
+ 'chatty'
182
+
183
+ >>> @chatty
184
+ ... def f(): pass
185
+ ...
186
+ >>> f()
187
+ Calling 'f'
188
+
189
+ decorator can also take in input a class with a .caller method; in this
190
+ case it converts the class into a factory of callable decorator objects.
191
+ See the documentation for an example.
192
+ """
193
+ if inspect.isclass(caller):
194
+ return decorator_factory(caller)
195
+
196
+ def _decorator(func): # the real meat is here
197
+ infodict = getinfo(func)
198
+ argnames = infodict["argnames"]
199
+ assert not (
200
+ "_call_" in argnames or "_func_" in argnames
201
+ ), "You cannot use _call_ or _func_ as argument names!"
202
+ src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict
203
+ # import sys; print >> sys.stderr, src # for debugging purposes
204
+ dec_func = eval(src, dict(_func_=func, _call_=caller))
205
+ return update_wrapper(dec_func, func, infodict)
206
+
207
+ return update_wrapper(_decorator, caller)
208
+
209
+
210
+ def getattr_(obj, name, default_thunk):
211
+ "Similar to .setdefault in dictionaries."
212
+ try:
213
+ return getattr(obj, name)
214
+ except AttributeError:
215
+ default = default_thunk()
216
+ setattr(obj, name, default)
217
+ return default
218
+
219
+
220
+ @decorator
221
+ def memoize(func, *args):
222
+ dic = getattr_(func, "memoize_dic", dict)
223
+ # memoize_dic is created at the first call
224
+ if args in dic:
225
+ return dic[args]
226
+ result = func(*args)
227
+ dic[args] = result
228
+ return result
229
+
230
+
231
+ ########################## LEGALESE ###############################
232
+
233
+ ## Redistributions of source code must retain the above copyright
234
+ ## notice, this list of conditions and the following disclaimer.
235
+ ## Redistributions in bytecode form must reproduce the above copyright
236
+ ## notice, this list of conditions and the following disclaimer in
237
+ ## the documentation and/or other materials provided with the
238
+ ## distribution.
239
+
240
+ ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
241
+ ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
242
+ ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
243
+ ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
244
+ ## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
245
+ ## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
246
+ ## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
247
+ ## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
248
+ ## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
249
+ ## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
250
+ ## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
251
+ ## DAMAGE.
llmeval-env/lib/python3.10/site-packages/nltk/downloader.py ADDED
@@ -0,0 +1,2559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus & Model Downloader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The NLTK corpus and module downloader. This module defines several
10
+ interfaces which can be used to download corpora, models, and other
11
+ data packages that can be used with NLTK.
12
+
13
+ Downloading Packages
14
+ ====================
15
+ If called with no arguments, ``download()`` will display an interactive
16
+ interface which can be used to download and install new packages.
17
+ If Tkinter is available, then a graphical interface will be shown,
18
+ otherwise a simple text interface will be provided.
19
+
20
+ Individual packages can be downloaded by calling the ``download()``
21
+ function with a single argument, giving the package identifier for the
22
+ package that should be downloaded:
23
+
24
+ >>> download('treebank') # doctest: +SKIP
25
+ [nltk_data] Downloading package 'treebank'...
26
+ [nltk_data] Unzipping corpora/treebank.zip.
27
+
28
+ NLTK also provides a number of \"package collections\", consisting of
29
+ a group of related packages. To download all packages in a
30
+ colleciton, simply call ``download()`` with the collection's
31
+ identifier:
32
+
33
+ >>> download('all-corpora') # doctest: +SKIP
34
+ [nltk_data] Downloading package 'abc'...
35
+ [nltk_data] Unzipping corpora/abc.zip.
36
+ [nltk_data] Downloading package 'alpino'...
37
+ [nltk_data] Unzipping corpora/alpino.zip.
38
+ ...
39
+ [nltk_data] Downloading package 'words'...
40
+ [nltk_data] Unzipping corpora/words.zip.
41
+
42
+ Download Directory
43
+ ==================
44
+ By default, packages are installed in either a system-wide directory
45
+ (if Python has sufficient access to write to it); or in the current
46
+ user's home directory. However, the ``download_dir`` argument may be
47
+ used to specify a different installation target, if desired.
48
+
49
+ See ``Downloader.default_download_dir()`` for more a detailed
50
+ description of how the default download directory is chosen.
51
+
52
+ NLTK Download Server
53
+ ====================
54
+ Before downloading any packages, the corpus and module downloader
55
+ contacts the NLTK download server, to retrieve an index file
56
+ describing the available packages. By default, this index file is
57
+ loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
58
+ If necessary, it is possible to create a new ``Downloader`` object,
59
+ specifying a different URL for the package index file.
60
+
61
+ Usage::
62
+
63
+ python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
64
+
65
+ or::
66
+
67
+ python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
68
+ """
69
+ # ----------------------------------------------------------------------
70
+
71
+ """
72
+
73
+ 0 1 2 3
74
+ [label][----][label][----]
75
+ [column ][column ]
76
+
77
+ Notes
78
+ =====
79
+ Handling data files.. Some questions:
80
+
81
+ * Should the data files be kept zipped or unzipped? I say zipped.
82
+
83
+ * Should the data files be kept in svn at all? Advantages: history;
84
+ automatic version numbers; 'svn up' could be used rather than the
85
+ downloader to update the corpora. Disadvantages: they're big,
86
+ which makes working from svn a bit of a pain. And we're planning
87
+ to potentially make them much bigger. I don't think we want
88
+ people to have to download 400MB corpora just to use nltk from svn.
89
+
90
+ * Compromise: keep the data files in trunk/data rather than in
91
+ trunk/nltk. That way you can check them out in svn if you want
92
+ to; but you don't need to, and you can use the downloader instead.
93
+
94
+ * Also: keep models in mind. When we change the code, we'd
95
+ potentially like the models to get updated. This could require a
96
+ little thought.
97
+
98
+ * So.. let's assume we have a trunk/data directory, containing a bunch
99
+ of packages. The packages should be kept as zip files, because we
100
+ really shouldn't be editing them much (well -- we may edit models
101
+ more, but they tend to be binary-ish files anyway, where diffs
102
+ aren't that helpful). So we'll have trunk/data, with a bunch of
103
+ files like abc.zip and treebank.zip and propbank.zip. For each
104
+ package we could also have eg treebank.xml and propbank.xml,
105
+ describing the contents of the package (name, copyright, license,
106
+ etc). Collections would also have .xml files. Finally, we would
107
+ pull all these together to form a single index.xml file. Some
108
+ directory structure wouldn't hurt. So how about::
109
+
110
+ /trunk/data/ ....................... root of data svn
111
+ index.xml ........................ main index file
112
+ src/ ............................. python scripts
113
+ packages/ ........................ dir for packages
114
+ corpora/ ....................... zip & xml files for corpora
115
+ grammars/ ...................... zip & xml files for grammars
116
+ taggers/ ....................... zip & xml files for taggers
117
+ tokenizers/ .................... zip & xml files for tokenizers
118
+ etc.
119
+ collections/ ..................... xml files for collections
120
+
121
+ Where the root (/trunk/data) would contain a makefile; and src/
122
+ would contain a script to update the info.xml file. It could also
123
+ contain scripts to rebuild some of the various model files. The
124
+ script that builds index.xml should probably check that each zip
125
+ file expands entirely into a single subdir, whose name matches the
126
+ package's uid.
127
+
128
+ Changes I need to make:
129
+ - in index: change "size" to "filesize" or "compressed-size"
130
+ - in index: add "unzipped-size"
131
+ - when checking status: check both compressed & uncompressed size.
132
+ uncompressed size is important to make sure we detect a problem
133
+ if something got partially unzipped. define new status values
134
+ to differentiate stale vs corrupt vs corruptly-uncompressed??
135
+ (we shouldn't need to re-download the file if the zip file is ok
136
+ but it didn't get uncompressed fully.)
137
+ - add other fields to the index: author, license, copyright, contact,
138
+ etc.
139
+
140
+ the current grammars/ package would become a single new package (eg
141
+ toy-grammars or book-grammars).
142
+
143
+ xml file should have:
144
+ - authorship info
145
+ - license info
146
+ - copyright info
147
+ - contact info
148
+ - info about what type of data/annotation it contains?
149
+ - recommended corpus reader?
150
+
151
+ collections can contain other collections. they can also contain
152
+ multiple package types (corpora & models). Have a single 'basics'
153
+ package that includes everything we talk about in the book?
154
+
155
+ n.b.: there will have to be a fallback to the punkt tokenizer, in case
156
+ they didn't download that model.
157
+
158
+ default: unzip or not?
159
+
160
+ """
161
+ import functools
162
+ import itertools
163
+ import os
164
+ import shutil
165
+ import subprocess
166
+ import sys
167
+ import textwrap
168
+ import threading
169
+ import time
170
+ import warnings
171
+ import zipfile
172
+ from hashlib import md5
173
+ from xml.etree import ElementTree
174
+
175
+ try:
176
+ TKINTER = True
177
+ from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk
178
+ from tkinter.messagebox import showerror
179
+
180
+ from nltk.draw.table import Table
181
+ from nltk.draw.util import ShowText
182
+ except ImportError:
183
+ TKINTER = False
184
+ TclError = ValueError
185
+
186
+ from urllib.error import HTTPError, URLError
187
+ from urllib.request import urlopen
188
+
189
+ import nltk
190
+
191
+ # urllib2 = nltk.internals.import_from_stdlib('urllib2')
192
+
193
+
194
+ ######################################################################
195
+ # Directory entry objects (from the data server's index file)
196
+ ######################################################################
197
+
198
+
199
+ class Package:
200
+ """
201
+ A directory entry for a downloadable package. These entries are
202
+ extracted from the XML index file that is downloaded by
203
+ ``Downloader``. Each package consists of a single file; but if
204
+ that file is a zip file, then it can be automatically decompressed
205
+ when the package is installed.
206
+ """
207
+
208
+ def __init__(
209
+ self,
210
+ id,
211
+ url,
212
+ name=None,
213
+ subdir="",
214
+ size=None,
215
+ unzipped_size=None,
216
+ checksum=None,
217
+ svn_revision=None,
218
+ copyright="Unknown",
219
+ contact="Unknown",
220
+ license="Unknown",
221
+ author="Unknown",
222
+ unzip=True,
223
+ **kw,
224
+ ):
225
+ self.id = id
226
+ """A unique identifier for this package."""
227
+
228
+ self.name = name or id
229
+ """A string name for this package."""
230
+
231
+ self.subdir = subdir
232
+ """The subdirectory where this package should be installed.
233
+ E.g., ``'corpora'`` or ``'taggers'``."""
234
+
235
+ self.url = url
236
+ """A URL that can be used to download this package's file."""
237
+
238
+ self.size = int(size)
239
+ """The filesize (in bytes) of the package file."""
240
+
241
+ self.unzipped_size = int(unzipped_size)
242
+ """The total filesize of the files contained in the package's
243
+ zipfile."""
244
+
245
+ self.checksum = checksum
246
+ """The MD-5 checksum of the package file."""
247
+
248
+ self.svn_revision = svn_revision
249
+ """A subversion revision number for this package."""
250
+
251
+ self.copyright = copyright
252
+ """Copyright holder for this package."""
253
+
254
+ self.contact = contact
255
+ """Name & email of the person who should be contacted with
256
+ questions about this package."""
257
+
258
+ self.license = license
259
+ """License information for this package."""
260
+
261
+ self.author = author
262
+ """Author of this package."""
263
+
264
+ ext = os.path.splitext(url.split("/")[-1])[1]
265
+ self.filename = os.path.join(subdir, id + ext)
266
+ """The filename that should be used for this package's file. It
267
+ is formed by joining ``self.subdir`` with ``self.id``, and
268
+ using the same extension as ``url``."""
269
+
270
+ self.unzip = bool(int(unzip)) # '0' or '1'
271
+ """A flag indicating whether this corpus should be unzipped by
272
+ default."""
273
+
274
+ # Include any other attributes provided by the XML file.
275
+ self.__dict__.update(kw)
276
+
277
+ @staticmethod
278
+ def fromxml(xml):
279
+ if isinstance(xml, str):
280
+ xml = ElementTree.parse(xml)
281
+ for key in xml.attrib:
282
+ xml.attrib[key] = str(xml.attrib[key])
283
+ return Package(**xml.attrib)
284
+
285
+ def __lt__(self, other):
286
+ return self.id < other.id
287
+
288
+ def __repr__(self):
289
+ return "<Package %s>" % self.id
290
+
291
+
292
+ class Collection:
293
+ """
294
+ A directory entry for a collection of downloadable packages.
295
+ These entries are extracted from the XML index file that is
296
+ downloaded by ``Downloader``.
297
+ """
298
+
299
+ def __init__(self, id, children, name=None, **kw):
300
+ self.id = id
301
+ """A unique identifier for this collection."""
302
+
303
+ self.name = name or id
304
+ """A string name for this collection."""
305
+
306
+ self.children = children
307
+ """A list of the ``Collections`` or ``Packages`` directly
308
+ contained by this collection."""
309
+
310
+ self.packages = None
311
+ """A list of ``Packages`` contained by this collection or any
312
+ collections it recursively contains."""
313
+
314
+ # Include any other attributes provided by the XML file.
315
+ self.__dict__.update(kw)
316
+
317
+ @staticmethod
318
+ def fromxml(xml):
319
+ if isinstance(xml, str):
320
+ xml = ElementTree.parse(xml)
321
+ for key in xml.attrib:
322
+ xml.attrib[key] = str(xml.attrib[key])
323
+ children = [child.get("ref") for child in xml.findall("item")]
324
+ return Collection(children=children, **xml.attrib)
325
+
326
+ def __lt__(self, other):
327
+ return self.id < other.id
328
+
329
+ def __repr__(self):
330
+ return "<Collection %s>" % self.id
331
+
332
+
333
+ ######################################################################
334
+ # Message Passing Objects
335
+ ######################################################################
336
+
337
+
338
+ class DownloaderMessage:
339
+ """A status message object, used by ``incr_download`` to
340
+ communicate its progress."""
341
+
342
+
343
+ class StartCollectionMessage(DownloaderMessage):
344
+ """Data server has started working on a collection of packages."""
345
+
346
+ def __init__(self, collection):
347
+ self.collection = collection
348
+
349
+
350
+ class FinishCollectionMessage(DownloaderMessage):
351
+ """Data server has finished working on a collection of packages."""
352
+
353
+ def __init__(self, collection):
354
+ self.collection = collection
355
+
356
+
357
+ class StartPackageMessage(DownloaderMessage):
358
+ """Data server has started working on a package."""
359
+
360
+ def __init__(self, package):
361
+ self.package = package
362
+
363
+
364
+ class FinishPackageMessage(DownloaderMessage):
365
+ """Data server has finished working on a package."""
366
+
367
+ def __init__(self, package):
368
+ self.package = package
369
+
370
+
371
+ class StartDownloadMessage(DownloaderMessage):
372
+ """Data server has started downloading a package."""
373
+
374
+ def __init__(self, package):
375
+ self.package = package
376
+
377
+
378
+ class FinishDownloadMessage(DownloaderMessage):
379
+ """Data server has finished downloading a package."""
380
+
381
+ def __init__(self, package):
382
+ self.package = package
383
+
384
+
385
+ class StartUnzipMessage(DownloaderMessage):
386
+ """Data server has started unzipping a package."""
387
+
388
+ def __init__(self, package):
389
+ self.package = package
390
+
391
+
392
+ class FinishUnzipMessage(DownloaderMessage):
393
+ """Data server has finished unzipping a package."""
394
+
395
+ def __init__(self, package):
396
+ self.package = package
397
+
398
+
399
+ class UpToDateMessage(DownloaderMessage):
400
+ """The package download file is already up-to-date"""
401
+
402
+ def __init__(self, package):
403
+ self.package = package
404
+
405
+
406
+ class StaleMessage(DownloaderMessage):
407
+ """The package download file is out-of-date or corrupt"""
408
+
409
+ def __init__(self, package):
410
+ self.package = package
411
+
412
+
413
+ class ErrorMessage(DownloaderMessage):
414
+ """Data server encountered an error"""
415
+
416
+ def __init__(self, package, message):
417
+ self.package = package
418
+ if isinstance(message, Exception):
419
+ self.message = str(message)
420
+ else:
421
+ self.message = message
422
+
423
+
424
+ class ProgressMessage(DownloaderMessage):
425
+ """Indicates how much progress the data server has made"""
426
+
427
+ def __init__(self, progress):
428
+ self.progress = progress
429
+
430
+
431
+ class SelectDownloadDirMessage(DownloaderMessage):
432
+ """Indicates what download directory the data server is using"""
433
+
434
+ def __init__(self, download_dir):
435
+ self.download_dir = download_dir
436
+
437
+
438
+ ######################################################################
439
+ # NLTK Data Server
440
+ ######################################################################
441
+
442
+
443
+ class Downloader:
444
+ """
445
+ A class used to access the NLTK data server, which can be used to
446
+ download corpora and other data packages.
447
+ """
448
+
449
+ # /////////////////////////////////////////////////////////////////
450
+ # Configuration
451
+ # /////////////////////////////////////////////////////////////////
452
+
453
+ INDEX_TIMEOUT = 60 * 60 # 1 hour
454
+ """The amount of time after which the cached copy of the data
455
+ server index will be considered 'stale,' and will be
456
+ re-downloaded."""
457
+
458
+ DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml"
459
+ """The default URL for the NLTK data server's index. An
460
+ alternative URL can be specified when creating a new
461
+ ``Downloader`` object."""
462
+
463
+ # /////////////////////////////////////////////////////////////////
464
+ # Status Constants
465
+ # /////////////////////////////////////////////////////////////////
466
+
467
+ INSTALLED = "installed"
468
+ """A status string indicating that a package or collection is
469
+ installed and up-to-date."""
470
+ NOT_INSTALLED = "not installed"
471
+ """A status string indicating that a package or collection is
472
+ not installed."""
473
+ STALE = "out of date"
474
+ """A status string indicating that a package or collection is
475
+ corrupt or out-of-date."""
476
+ PARTIAL = "partial"
477
+ """A status string indicating that a collection is partially
478
+ installed (i.e., only some of its packages are installed.)"""
479
+
480
+ # /////////////////////////////////////////////////////////////////
481
+ # Constructor
482
+ # /////////////////////////////////////////////////////////////////
483
+
484
+ def __init__(self, server_index_url=None, download_dir=None):
485
+ self._url = server_index_url or self.DEFAULT_URL
486
+ """The URL for the data server's index file."""
487
+
488
+ self._collections = {}
489
+ """Dictionary from collection identifier to ``Collection``"""
490
+
491
+ self._packages = {}
492
+ """Dictionary from package identifier to ``Package``"""
493
+
494
+ self._download_dir = download_dir
495
+ """The default directory to which packages will be downloaded."""
496
+
497
+ self._index = None
498
+ """The XML index file downloaded from the data server"""
499
+
500
+ self._index_timestamp = None
501
+ """Time at which ``self._index`` was downloaded. If it is more
502
+ than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
503
+
504
+ self._status_cache = {}
505
+ """Dictionary from package/collection identifier to status
506
+ string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
507
+ ``PARTIAL``). Cache is used for packages only, not
508
+ collections."""
509
+
510
+ self._errors = None
511
+ """Flag for telling if all packages got successfully downloaded or not."""
512
+
513
+ # decide where we're going to save things to.
514
+ if self._download_dir is None:
515
+ self._download_dir = self.default_download_dir()
516
+
517
+ # /////////////////////////////////////////////////////////////////
518
+ # Information
519
+ # /////////////////////////////////////////////////////////////////
520
+
521
+ def list(
522
+ self,
523
+ download_dir=None,
524
+ show_packages=True,
525
+ show_collections=True,
526
+ header=True,
527
+ more_prompt=False,
528
+ skip_installed=False,
529
+ ):
530
+ lines = 0 # for more_prompt
531
+ if download_dir is None:
532
+ download_dir = self._download_dir
533
+ print("Using default data directory (%s)" % download_dir)
534
+ if header:
535
+ print("=" * (26 + len(self._url)))
536
+ print(" Data server index for <%s>" % self._url)
537
+ print("=" * (26 + len(self._url)))
538
+ lines += 3 # for more_prompt
539
+ stale = partial = False
540
+
541
+ categories = []
542
+ if show_packages:
543
+ categories.append("packages")
544
+ if show_collections:
545
+ categories.append("collections")
546
+ for category in categories:
547
+ print("%s:" % category.capitalize())
548
+ lines += 1 # for more_prompt
549
+ for info in sorted(getattr(self, category)(), key=str):
550
+ status = self.status(info, download_dir)
551
+ if status == self.INSTALLED and skip_installed:
552
+ continue
553
+ if status == self.STALE:
554
+ stale = True
555
+ if status == self.PARTIAL:
556
+ partial = True
557
+ prefix = {
558
+ self.INSTALLED: "*",
559
+ self.STALE: "-",
560
+ self.PARTIAL: "P",
561
+ self.NOT_INSTALLED: " ",
562
+ }[status]
563
+ name = textwrap.fill(
564
+ "-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " "
565
+ )[27:]
566
+ print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name))
567
+ lines += len(name.split("\n")) # for more_prompt
568
+ if more_prompt and lines > 20:
569
+ user_input = input("Hit Enter to continue: ")
570
+ if user_input.lower() in ("x", "q"):
571
+ return
572
+ lines = 0
573
+ print()
574
+ msg = "([*] marks installed packages"
575
+ if stale:
576
+ msg += "; [-] marks out-of-date or corrupt packages"
577
+ if partial:
578
+ msg += "; [P] marks partially installed collections"
579
+ print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76))
580
+
581
+ def packages(self):
582
+ self._update_index()
583
+ return self._packages.values()
584
+
585
+ def corpora(self):
586
+ self._update_index()
587
+ return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"]
588
+
589
+ def models(self):
590
+ self._update_index()
591
+ return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"]
592
+
593
+ def collections(self):
594
+ self._update_index()
595
+ return self._collections.values()
596
+
597
+ # /////////////////////////////////////////////////////////////////
598
+ # Downloading
599
+ # /////////////////////////////////////////////////////////////////
600
+
601
+ def _info_or_id(self, info_or_id):
602
+ if isinstance(info_or_id, str):
603
+ return self.info(info_or_id)
604
+ else:
605
+ return info_or_id
606
+
607
+ # [xx] When during downloading is it 'safe' to abort? Only unsafe
608
+ # time is *during* an unzip -- we don't want to leave a
609
+ # partially-unzipped corpus in place because we wouldn't notice
610
+ # it. But if we had the exact total size of the unzipped corpus,
611
+ # then that would be fine. Then we could abort anytime we want!
612
+ # So this is really what we should do. That way the threaded
613
+ # downloader in the gui can just kill the download thread anytime
614
+ # it wants.
615
+
616
+ def incr_download(self, info_or_id, download_dir=None, force=False):
617
+ # If they didn't specify a download_dir, then use the default one.
618
+ if download_dir is None:
619
+ download_dir = self._download_dir
620
+ yield SelectDownloadDirMessage(download_dir)
621
+
622
+ # If they gave us a list of ids, then download each one.
623
+ if isinstance(info_or_id, (list, tuple)):
624
+ yield from self._download_list(info_or_id, download_dir, force)
625
+ return
626
+
627
+ # Look up the requested collection or package.
628
+ try:
629
+ info = self._info_or_id(info_or_id)
630
+ except (OSError, ValueError) as e:
631
+ yield ErrorMessage(None, f"Error loading {info_or_id}: {e}")
632
+ return
633
+
634
+ # Handle collections.
635
+ if isinstance(info, Collection):
636
+ yield StartCollectionMessage(info)
637
+ yield from self.incr_download(info.children, download_dir, force)
638
+ yield FinishCollectionMessage(info)
639
+
640
+ # Handle Packages (delegate to a helper function).
641
+ else:
642
+ yield from self._download_package(info, download_dir, force)
643
+
644
+ def _num_packages(self, item):
645
+ if isinstance(item, Package):
646
+ return 1
647
+ else:
648
+ return len(item.packages)
649
+
650
+ def _download_list(self, items, download_dir, force):
651
+ # Look up the requested items.
652
+ for i in range(len(items)):
653
+ try:
654
+ items[i] = self._info_or_id(items[i])
655
+ except (OSError, ValueError) as e:
656
+ yield ErrorMessage(items[i], e)
657
+ return
658
+
659
+ # Download each item, re-scaling their progress.
660
+ num_packages = sum(self._num_packages(item) for item in items)
661
+ progress = 0
662
+ for i, item in enumerate(items):
663
+ if isinstance(item, Package):
664
+ delta = 1.0 / num_packages
665
+ else:
666
+ delta = len(item.packages) / num_packages
667
+ for msg in self.incr_download(item, download_dir, force):
668
+ if isinstance(msg, ProgressMessage):
669
+ yield ProgressMessage(progress + msg.progress * delta)
670
+ else:
671
+ yield msg
672
+
673
+ progress += 100 * delta
674
+
675
+ def _download_package(self, info, download_dir, force):
676
+ yield StartPackageMessage(info)
677
+ yield ProgressMessage(0)
678
+
679
+ # Do we already have the current version?
680
+ status = self.status(info, download_dir)
681
+ if not force and status == self.INSTALLED:
682
+ yield UpToDateMessage(info)
683
+ yield ProgressMessage(100)
684
+ yield FinishPackageMessage(info)
685
+ return
686
+
687
+ # Remove the package from our status cache
688
+ self._status_cache.pop(info.id, None)
689
+
690
+ # Check for (and remove) any old/stale version.
691
+ filepath = os.path.join(download_dir, info.filename)
692
+ if os.path.exists(filepath):
693
+ if status == self.STALE:
694
+ yield StaleMessage(info)
695
+ os.remove(filepath)
696
+
697
+ # Ensure the download_dir exists
698
+ if not os.path.exists(download_dir):
699
+ os.makedirs(download_dir)
700
+ if not os.path.exists(os.path.join(download_dir, info.subdir)):
701
+ os.makedirs(os.path.join(download_dir, info.subdir))
702
+
703
+ # Download the file. This will raise an IOError if the url
704
+ # is not found.
705
+ yield StartDownloadMessage(info)
706
+ yield ProgressMessage(5)
707
+ try:
708
+ infile = urlopen(info.url)
709
+ with open(filepath, "wb") as outfile:
710
+ num_blocks = max(1, info.size / (1024 * 16))
711
+ for block in itertools.count():
712
+ s = infile.read(1024 * 16) # 16k blocks.
713
+ outfile.write(s)
714
+ if not s:
715
+ break
716
+ if block % 2 == 0: # how often?
717
+ yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
718
+ infile.close()
719
+ except OSError as e:
720
+ yield ErrorMessage(
721
+ info,
722
+ "Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e),
723
+ )
724
+ return
725
+ yield FinishDownloadMessage(info)
726
+ yield ProgressMessage(80)
727
+
728
+ # If it's a zipfile, uncompress it.
729
+ if info.filename.endswith(".zip"):
730
+ zipdir = os.path.join(download_dir, info.subdir)
731
+ # Unzip if we're unzipping by default; *or* if it's already
732
+ # been unzipped (presumably a previous version).
733
+ if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
734
+ yield StartUnzipMessage(info)
735
+ for msg in _unzip_iter(filepath, zipdir, verbose=False):
736
+ # Somewhat of a hack, but we need a proper package reference
737
+ msg.package = info
738
+ yield msg
739
+ yield FinishUnzipMessage(info)
740
+
741
+ yield FinishPackageMessage(info)
742
+
743
+ def download(
744
+ self,
745
+ info_or_id=None,
746
+ download_dir=None,
747
+ quiet=False,
748
+ force=False,
749
+ prefix="[nltk_data] ",
750
+ halt_on_error=True,
751
+ raise_on_error=False,
752
+ print_error_to=sys.stderr,
753
+ ):
754
+
755
+ print_to = functools.partial(print, file=print_error_to)
756
+ # If no info or id is given, then use the interactive shell.
757
+ if info_or_id is None:
758
+ # [xx] hmm -- changing self._download_dir here seems like
759
+ # the wrong thing to do. Maybe the _interactive_download
760
+ # function should make a new copy of self to use?
761
+ if download_dir is not None:
762
+ self._download_dir = download_dir
763
+ self._interactive_download()
764
+ return True
765
+
766
+ else:
767
+ # Define a helper function for displaying output:
768
+ def show(s, prefix2=""):
769
+ print_to(
770
+ textwrap.fill(
771
+ s,
772
+ initial_indent=prefix + prefix2,
773
+ subsequent_indent=prefix + prefix2 + " " * 4,
774
+ )
775
+ )
776
+
777
+ for msg in self.incr_download(info_or_id, download_dir, force):
778
+ # Error messages
779
+ if isinstance(msg, ErrorMessage):
780
+ show(msg.message)
781
+ if raise_on_error:
782
+ raise ValueError(msg.message)
783
+ if halt_on_error:
784
+ return False
785
+ self._errors = True
786
+ if not quiet:
787
+ print_to("Error installing package. Retry? [n/y/e]")
788
+ choice = input().strip()
789
+ if choice in ["y", "Y"]:
790
+ if not self.download(
791
+ msg.package.id,
792
+ download_dir,
793
+ quiet,
794
+ force,
795
+ prefix,
796
+ halt_on_error,
797
+ raise_on_error,
798
+ ):
799
+ return False
800
+ elif choice in ["e", "E"]:
801
+ return False
802
+
803
+ # All other messages
804
+ if not quiet:
805
+ # Collection downloading messages:
806
+ if isinstance(msg, StartCollectionMessage):
807
+ show("Downloading collection %r" % msg.collection.id)
808
+ prefix += " | "
809
+ print_to(prefix)
810
+ elif isinstance(msg, FinishCollectionMessage):
811
+ print_to(prefix)
812
+ prefix = prefix[:-4]
813
+ if self._errors:
814
+ show(
815
+ "Downloaded collection %r with errors"
816
+ % msg.collection.id
817
+ )
818
+ else:
819
+ show("Done downloading collection %s" % msg.collection.id)
820
+
821
+ # Package downloading messages:
822
+ elif isinstance(msg, StartPackageMessage):
823
+ show(
824
+ "Downloading package %s to %s..."
825
+ % (msg.package.id, download_dir)
826
+ )
827
+ elif isinstance(msg, UpToDateMessage):
828
+ show("Package %s is already up-to-date!" % msg.package.id, " ")
829
+ # elif isinstance(msg, StaleMessage):
830
+ # show('Package %s is out-of-date or corrupt' %
831
+ # msg.package.id, ' ')
832
+ elif isinstance(msg, StartUnzipMessage):
833
+ show("Unzipping %s." % msg.package.filename, " ")
834
+
835
+ # Data directory message:
836
+ elif isinstance(msg, SelectDownloadDirMessage):
837
+ download_dir = msg.download_dir
838
+ return True
839
+
840
+ def is_stale(self, info_or_id, download_dir=None):
841
+ return self.status(info_or_id, download_dir) == self.STALE
842
+
843
+ def is_installed(self, info_or_id, download_dir=None):
844
+ return self.status(info_or_id, download_dir) == self.INSTALLED
845
+
846
+ def clear_status_cache(self, id=None):
847
+ if id is None:
848
+ self._status_cache.clear()
849
+ else:
850
+ self._status_cache.pop(id, None)
851
+
852
+ def status(self, info_or_id, download_dir=None):
853
+ """
854
+ Return a constant describing the status of the given package
855
+ or collection. Status can be one of ``INSTALLED``,
856
+ ``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
857
+ """
858
+ if download_dir is None:
859
+ download_dir = self._download_dir
860
+ info = self._info_or_id(info_or_id)
861
+
862
+ # Handle collections:
863
+ if isinstance(info, Collection):
864
+ pkg_status = [self.status(pkg.id) for pkg in info.packages]
865
+ if self.STALE in pkg_status:
866
+ return self.STALE
867
+ elif self.PARTIAL in pkg_status:
868
+ return self.PARTIAL
869
+ elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
870
+ return self.PARTIAL
871
+ elif self.NOT_INSTALLED in pkg_status:
872
+ return self.NOT_INSTALLED
873
+ else:
874
+ return self.INSTALLED
875
+
876
+ # Handle packages:
877
+ else:
878
+ filepath = os.path.join(download_dir, info.filename)
879
+ if download_dir != self._download_dir:
880
+ return self._pkg_status(info, filepath)
881
+ else:
882
+ if info.id not in self._status_cache:
883
+ self._status_cache[info.id] = self._pkg_status(info, filepath)
884
+ return self._status_cache[info.id]
885
+
886
+ def _pkg_status(self, info, filepath):
887
+ if not os.path.exists(filepath):
888
+ return self.NOT_INSTALLED
889
+
890
+ # Check if the file has the correct size.
891
+ try:
892
+ filestat = os.stat(filepath)
893
+ except OSError:
894
+ return self.NOT_INSTALLED
895
+ if filestat.st_size != int(info.size):
896
+ return self.STALE
897
+
898
+ # Check if the file's checksum matches
899
+ if md5_hexdigest(filepath) != info.checksum:
900
+ return self.STALE
901
+
902
+ # If it's a zipfile, and it's been at least partially
903
+ # unzipped, then check if it's been fully unzipped.
904
+ if filepath.endswith(".zip"):
905
+ unzipdir = filepath[:-4]
906
+ if not os.path.exists(unzipdir):
907
+ return self.INSTALLED # but not unzipped -- ok!
908
+ if not os.path.isdir(unzipdir):
909
+ return self.STALE
910
+
911
+ unzipped_size = sum(
912
+ os.stat(os.path.join(d, f)).st_size
913
+ for d, _, files in os.walk(unzipdir)
914
+ for f in files
915
+ )
916
+ if unzipped_size != info.unzipped_size:
917
+ return self.STALE
918
+
919
+ # Otherwise, everything looks good.
920
+ return self.INSTALLED
921
+
922
+ def update(self, quiet=False, prefix="[nltk_data] "):
923
+ """
924
+ Re-download any packages whose status is STALE.
925
+ """
926
+ self.clear_status_cache()
927
+ for pkg in self.packages():
928
+ if self.status(pkg) == self.STALE:
929
+ self.download(pkg, quiet=quiet, prefix=prefix)
930
+
931
+ # /////////////////////////////////////////////////////////////////
932
+ # Index
933
+ # /////////////////////////////////////////////////////////////////
934
+
935
+ def _update_index(self, url=None):
936
+ """A helper function that ensures that self._index is
937
+ up-to-date. If the index is older than self.INDEX_TIMEOUT,
938
+ then download it again."""
939
+ # Check if the index is already up-to-date. If so, do nothing.
940
+ if not (
941
+ self._index is None
942
+ or url is not None
943
+ or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
944
+ ):
945
+ return
946
+
947
+ # If a URL was specified, then update our URL.
948
+ self._url = url or self._url
949
+
950
+ # Download the index file.
951
+ self._index = nltk.internals.ElementWrapper(
952
+ ElementTree.parse(urlopen(self._url)).getroot()
953
+ )
954
+ self._index_timestamp = time.time()
955
+
956
+ # Build a dictionary of packages.
957
+ packages = [Package.fromxml(p) for p in self._index.findall("packages/package")]
958
+ self._packages = {p.id: p for p in packages}
959
+
960
+ # Build a dictionary of collections.
961
+ collections = [
962
+ Collection.fromxml(c) for c in self._index.findall("collections/collection")
963
+ ]
964
+ self._collections = {c.id: c for c in collections}
965
+
966
+ # Replace identifiers with actual children in collection.children.
967
+ for collection in self._collections.values():
968
+ for i, child_id in enumerate(collection.children):
969
+ if child_id in self._packages:
970
+ collection.children[i] = self._packages[child_id]
971
+ elif child_id in self._collections:
972
+ collection.children[i] = self._collections[child_id]
973
+ else:
974
+ print(
975
+ "removing collection member with no package: {}".format(
976
+ child_id
977
+ )
978
+ )
979
+ del collection.children[i]
980
+
981
+ # Fill in collection.packages for each collection.
982
+ for collection in self._collections.values():
983
+ packages = {}
984
+ queue = [collection]
985
+ for child in queue:
986
+ if isinstance(child, Collection):
987
+ queue.extend(child.children)
988
+ elif isinstance(child, Package):
989
+ packages[child.id] = child
990
+ else:
991
+ pass
992
+ collection.packages = packages.values()
993
+
994
+ # Flush the status cache
995
+ self._status_cache.clear()
996
+
997
+ def index(self):
998
+ """
999
+ Return the XML index describing the packages available from
1000
+ the data server. If necessary, this index will be downloaded
1001
+ from the data server.
1002
+ """
1003
+ self._update_index()
1004
+ return self._index
1005
+
1006
+ def info(self, id):
1007
+ """Return the ``Package`` or ``Collection`` record for the
1008
+ given item."""
1009
+ self._update_index()
1010
+ if id in self._packages:
1011
+ return self._packages[id]
1012
+ if id in self._collections:
1013
+ return self._collections[id]
1014
+ raise ValueError("Package %r not found in index" % id)
1015
+
1016
+ def xmlinfo(self, id):
1017
+ """Return the XML info record for the given item"""
1018
+ self._update_index()
1019
+ for package in self._index.findall("packages/package"):
1020
+ if package.get("id") == id:
1021
+ return package
1022
+ for collection in self._index.findall("collections/collection"):
1023
+ if collection.get("id") == id:
1024
+ return collection
1025
+ raise ValueError("Package %r not found in index" % id)
1026
+
1027
+ # /////////////////////////////////////////////////////////////////
1028
+ # URL & Data Directory
1029
+ # /////////////////////////////////////////////////////////////////
1030
+
1031
+ def _get_url(self):
1032
+ """The URL for the data server's index file."""
1033
+ return self._url
1034
+
1035
+ def _set_url(self, url):
1036
+ """
1037
+ Set a new URL for the data server. If we're unable to contact
1038
+ the given url, then the original url is kept.
1039
+ """
1040
+ original_url = self._url
1041
+ try:
1042
+ self._update_index(url)
1043
+ except:
1044
+ self._url = original_url
1045
+ raise
1046
+
1047
+ url = property(_get_url, _set_url)
1048
+
1049
+ def default_download_dir(self):
1050
+ """
1051
+ Return the directory to which packages will be downloaded by
1052
+ default. This value can be overridden using the constructor,
1053
+ or on a case-by-case basis using the ``download_dir`` argument when
1054
+ calling ``download()``.
1055
+
1056
+ On Windows, the default download directory is
1057
+ ``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
1058
+ directory containing Python, e.g. ``C:\\Python25``.
1059
+
1060
+ On all other platforms, the default directory is the first of
1061
+ the following which exists or which can be created with write
1062
+ permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
1063
+ ``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
1064
+ """
1065
+ # Check if we are on GAE where we cannot write into filesystem.
1066
+ if "APPENGINE_RUNTIME" in os.environ:
1067
+ return
1068
+
1069
+ # Check if we have sufficient permissions to install in a
1070
+ # variety of system-wide locations.
1071
+ for nltkdir in nltk.data.path:
1072
+ if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
1073
+ return nltkdir
1074
+
1075
+ # On Windows, use %APPDATA%
1076
+ if sys.platform == "win32" and "APPDATA" in os.environ:
1077
+ homedir = os.environ["APPDATA"]
1078
+
1079
+ # Otherwise, install in the user's home directory.
1080
+ else:
1081
+ homedir = os.path.expanduser("~/")
1082
+ if homedir == "~/":
1083
+ raise ValueError("Could not find a default download directory")
1084
+
1085
+ # append "nltk_data" to the home directory
1086
+ return os.path.join(homedir, "nltk_data")
1087
+
1088
+ def _get_download_dir(self):
1089
+ """
1090
+ The default directory to which packages will be downloaded.
1091
+ This defaults to the value returned by ``default_download_dir()``.
1092
+ To override this default on a case-by-case basis, use the
1093
+ ``download_dir`` argument when calling ``download()``.
1094
+ """
1095
+ return self._download_dir
1096
+
1097
+ def _set_download_dir(self, download_dir):
1098
+ self._download_dir = download_dir
1099
+ # Clear the status cache.
1100
+ self._status_cache.clear()
1101
+
1102
+ download_dir = property(_get_download_dir, _set_download_dir)
1103
+
1104
+ # /////////////////////////////////////////////////////////////////
1105
+ # Interactive Shell
1106
+ # /////////////////////////////////////////////////////////////////
1107
+
1108
+ def _interactive_download(self):
1109
+ # Try the GUI first; if that doesn't work, try the simple
1110
+ # interactive shell.
1111
+ if TKINTER:
1112
+ try:
1113
+ DownloaderGUI(self).mainloop()
1114
+ except TclError:
1115
+ DownloaderShell(self).run()
1116
+ else:
1117
+ DownloaderShell(self).run()
1118
+
1119
+
1120
+ class DownloaderShell:
1121
+ def __init__(self, dataserver):
1122
+ self._ds = dataserver
1123
+
1124
+ def _simple_interactive_menu(self, *options):
1125
+ print("-" * 75)
1126
+ spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
1127
+ print(" " + spc.join(options))
1128
+ print("-" * 75)
1129
+
1130
+ def run(self):
1131
+ print("NLTK Downloader")
1132
+ while True:
1133
+ self._simple_interactive_menu(
1134
+ "d) Download",
1135
+ "l) List",
1136
+ " u) Update",
1137
+ "c) Config",
1138
+ "h) Help",
1139
+ "q) Quit",
1140
+ )
1141
+ user_input = input("Downloader> ").strip()
1142
+ if not user_input:
1143
+ print()
1144
+ continue
1145
+ command = user_input.lower().split()[0]
1146
+ args = user_input.split()[1:]
1147
+ try:
1148
+ if command == "l":
1149
+ print()
1150
+ self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
1151
+ elif command == "h":
1152
+ self._simple_interactive_help()
1153
+ elif command == "c":
1154
+ self._simple_interactive_config()
1155
+ elif command in ("q", "x"):
1156
+ return
1157
+ elif command == "d":
1158
+ self._simple_interactive_download(args)
1159
+ elif command == "u":
1160
+ self._simple_interactive_update()
1161
+ else:
1162
+ print("Command %r unrecognized" % user_input)
1163
+ except HTTPError as e:
1164
+ print("Error reading from server: %s" % e)
1165
+ except URLError as e:
1166
+ print("Error connecting to server: %s" % e.reason)
1167
+ # try checking if user_input is a package name, &
1168
+ # downloading it?
1169
+ print()
1170
+
1171
+ def _simple_interactive_download(self, args):
1172
+ if args:
1173
+ for arg in args:
1174
+ try:
1175
+ self._ds.download(arg, prefix=" ")
1176
+ except (OSError, ValueError) as e:
1177
+ print(e)
1178
+ else:
1179
+ while True:
1180
+ print()
1181
+ print("Download which package (l=list; x=cancel)?")
1182
+ user_input = input(" Identifier> ")
1183
+ if user_input.lower() == "l":
1184
+ self._ds.list(
1185
+ self._ds.download_dir,
1186
+ header=False,
1187
+ more_prompt=True,
1188
+ skip_installed=True,
1189
+ )
1190
+ continue
1191
+ elif user_input.lower() in ("x", "q", ""):
1192
+ return
1193
+ elif user_input:
1194
+ for id in user_input.split():
1195
+ try:
1196
+ self._ds.download(id, prefix=" ")
1197
+ except (OSError, ValueError) as e:
1198
+ print(e)
1199
+ break
1200
+
1201
+ def _simple_interactive_update(self):
1202
+ while True:
1203
+ stale_packages = []
1204
+ stale = partial = False
1205
+ for info in sorted(getattr(self._ds, "packages")(), key=str):
1206
+ if self._ds.status(info) == self._ds.STALE:
1207
+ stale_packages.append((info.id, info.name))
1208
+
1209
+ print()
1210
+ if stale_packages:
1211
+ print("Will update following packages (o=ok; x=cancel)")
1212
+ for pid, pname in stale_packages:
1213
+ name = textwrap.fill(
1214
+ "-" * 27 + (pname), 75, subsequent_indent=27 * " "
1215
+ )[27:]
1216
+ print(" [ ] {} {}".format(pid.ljust(20, "."), name))
1217
+ print()
1218
+
1219
+ user_input = input(" Identifier> ")
1220
+ if user_input.lower() == "o":
1221
+ for pid, pname in stale_packages:
1222
+ try:
1223
+ self._ds.download(pid, prefix=" ")
1224
+ except (OSError, ValueError) as e:
1225
+ print(e)
1226
+ break
1227
+ elif user_input.lower() in ("x", "q", ""):
1228
+ return
1229
+ else:
1230
+ print("Nothing to update.")
1231
+ return
1232
+
1233
+ def _simple_interactive_help(self):
1234
+ print()
1235
+ print("Commands:")
1236
+ print(
1237
+ " d) Download a package or collection u) Update out of date packages"
1238
+ )
1239
+ print(" l) List packages & collections h) Help")
1240
+ print(" c) View & Modify Configuration q) Quit")
1241
+
1242
+ def _show_config(self):
1243
+ print()
1244
+ print("Data Server:")
1245
+ print(" - URL: <%s>" % self._ds.url)
1246
+ print(" - %d Package Collections Available" % len(self._ds.collections()))
1247
+ print(" - %d Individual Packages Available" % len(self._ds.packages()))
1248
+ print()
1249
+ print("Local Machine:")
1250
+ print(" - Data directory: %s" % self._ds.download_dir)
1251
+
1252
+ def _simple_interactive_config(self):
1253
+ self._show_config()
1254
+ while True:
1255
+ print()
1256
+ self._simple_interactive_menu(
1257
+ "s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
1258
+ )
1259
+ user_input = input("Config> ").strip().lower()
1260
+ if user_input == "s":
1261
+ self._show_config()
1262
+ elif user_input == "d":
1263
+ new_dl_dir = input(" New Directory> ").strip()
1264
+ if new_dl_dir in ("", "x", "q", "X", "Q"):
1265
+ print(" Cancelled!")
1266
+ elif os.path.isdir(new_dl_dir):
1267
+ self._ds.download_dir = new_dl_dir
1268
+ else:
1269
+ print("Directory %r not found! Create it first." % new_dl_dir)
1270
+ elif user_input == "u":
1271
+ new_url = input(" New URL> ").strip()
1272
+ if new_url in ("", "x", "q", "X", "Q"):
1273
+ print(" Cancelled!")
1274
+ else:
1275
+ if not new_url.startswith(("http://", "https://")):
1276
+ new_url = "http://" + new_url
1277
+ try:
1278
+ self._ds.url = new_url
1279
+ except Exception as e:
1280
+ print(f"Error reading <{new_url!r}>:\n {e}")
1281
+ elif user_input == "m":
1282
+ break
1283
+
1284
+
1285
+ class DownloaderGUI:
1286
+ """
1287
+ Graphical interface for downloading packages from the NLTK data
1288
+ server.
1289
+ """
1290
+
1291
+ # /////////////////////////////////////////////////////////////////
1292
+ # Column Configuration
1293
+ # /////////////////////////////////////////////////////////////////
1294
+
1295
+ COLUMNS = [
1296
+ "",
1297
+ "Identifier",
1298
+ "Name",
1299
+ "Size",
1300
+ "Status",
1301
+ "Unzipped Size",
1302
+ "Copyright",
1303
+ "Contact",
1304
+ "License",
1305
+ "Author",
1306
+ "Subdir",
1307
+ "Checksum",
1308
+ ]
1309
+ """A list of the names of columns. This controls the order in
1310
+ which the columns will appear. If this is edited, then
1311
+ ``_package_to_columns()`` may need to be edited to match."""
1312
+
1313
+ COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0}
1314
+ """A dictionary specifying how columns should be resized when the
1315
+ table is resized. Columns with weight 0 will not be resized at
1316
+ all; and columns with high weight will be resized more.
1317
+ Default weight (for columns not explicitly listed) is 1."""
1318
+
1319
+ COLUMN_WIDTHS = {
1320
+ "": 1,
1321
+ "Identifier": 20,
1322
+ "Name": 45,
1323
+ "Size": 10,
1324
+ "Unzipped Size": 10,
1325
+ "Status": 12,
1326
+ }
1327
+ """A dictionary specifying how wide each column should be, in
1328
+ characters. The default width (for columns not explicitly
1329
+ listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
1330
+
1331
+ DEFAULT_COLUMN_WIDTH = 30
1332
+ """The default width for columns that are not explicitly listed
1333
+ in ``COLUMN_WIDTHS``."""
1334
+
1335
+ INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"]
1336
+ """The set of columns that should be displayed by default."""
1337
+
1338
+ # Perform a few import-time sanity checks to make sure that the
1339
+ # column configuration variables are defined consistently:
1340
+ for c in COLUMN_WEIGHTS:
1341
+ assert c in COLUMNS
1342
+ for c in COLUMN_WIDTHS:
1343
+ assert c in COLUMNS
1344
+ for c in INITIAL_COLUMNS:
1345
+ assert c in COLUMNS
1346
+
1347
+ # /////////////////////////////////////////////////////////////////
1348
+ # Color Configuration
1349
+ # /////////////////////////////////////////////////////////////////
1350
+
1351
+ _BACKDROP_COLOR = ("#000", "#ccc")
1352
+
1353
+ _ROW_COLOR = {
1354
+ Downloader.INSTALLED: ("#afa", "#080"),
1355
+ Downloader.PARTIAL: ("#ffa", "#880"),
1356
+ Downloader.STALE: ("#faa", "#800"),
1357
+ Downloader.NOT_INSTALLED: ("#fff", "#888"),
1358
+ }
1359
+
1360
+ _MARK_COLOR = ("#000", "#ccc")
1361
+
1362
+ # _FRONT_TAB_COLOR = ('#ccf', '#008')
1363
+ # _BACK_TAB_COLOR = ('#88a', '#448')
1364
+ _FRONT_TAB_COLOR = ("#fff", "#45c")
1365
+ _BACK_TAB_COLOR = ("#aaa", "#67a")
1366
+
1367
+ _PROGRESS_COLOR = ("#f00", "#aaa")
1368
+
1369
+ _TAB_FONT = "helvetica -16 bold"
1370
+
1371
+ # /////////////////////////////////////////////////////////////////
1372
+ # Constructor
1373
+ # /////////////////////////////////////////////////////////////////
1374
+
1375
+ def __init__(self, dataserver, use_threads=True):
1376
+ self._ds = dataserver
1377
+ self._use_threads = use_threads
1378
+
1379
+ # For the threaded downloader:
1380
+ self._download_lock = threading.Lock()
1381
+ self._download_msg_queue = []
1382
+ self._download_abort_queue = []
1383
+ self._downloading = False
1384
+
1385
+ # For tkinter after callbacks:
1386
+ self._afterid = {}
1387
+
1388
+ # A message log.
1389
+ self._log_messages = []
1390
+ self._log_indent = 0
1391
+ self._log("NLTK Downloader Started!")
1392
+
1393
+ # Create the main window.
1394
+ top = self.top = Tk()
1395
+ top.geometry("+50+50")
1396
+ top.title("NLTK Downloader")
1397
+ top.configure(background=self._BACKDROP_COLOR[1])
1398
+
1399
+ # Set up some bindings now, in case anything goes wrong.
1400
+ top.bind("<Control-q>", self.destroy)
1401
+ top.bind("<Control-x>", self.destroy)
1402
+ self._destroyed = False
1403
+
1404
+ self._column_vars = {}
1405
+
1406
+ # Initialize the GUI.
1407
+ self._init_widgets()
1408
+ self._init_menu()
1409
+ try:
1410
+ self._fill_table()
1411
+ except HTTPError as e:
1412
+ showerror("Error reading from server", e)
1413
+ except URLError as e:
1414
+ showerror("Error connecting to server", e.reason)
1415
+
1416
+ self._show_info()
1417
+ self._select_columns()
1418
+ self._table.select(0)
1419
+
1420
+ # Make sure we get notified when we're destroyed, so we can
1421
+ # cancel any download in progress.
1422
+ self._table.bind("<Destroy>", self._destroy)
1423
+
1424
+ def _log(self, msg):
1425
+ self._log_messages.append(
1426
+ "{} {}{}".format(time.ctime(), " | " * self._log_indent, msg)
1427
+ )
1428
+
1429
+ # /////////////////////////////////////////////////////////////////
1430
+ # Internals
1431
+ # /////////////////////////////////////////////////////////////////
1432
+
1433
+ def _init_widgets(self):
1434
+ # Create the top-level frame structures
1435
+ f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0)
1436
+ f1.pack(sid="top", expand=True, fill="both")
1437
+ f1.grid_rowconfigure(2, weight=1)
1438
+ f1.grid_columnconfigure(0, weight=1)
1439
+ Frame(f1, height=8).grid(column=0, row=0) # spacer
1440
+ tabframe = Frame(f1)
1441
+ tabframe.grid(column=0, row=1, sticky="news")
1442
+ tableframe = Frame(f1)
1443
+ tableframe.grid(column=0, row=2, sticky="news")
1444
+ buttonframe = Frame(f1)
1445
+ buttonframe.grid(column=0, row=3, sticky="news")
1446
+ Frame(f1, height=8).grid(column=0, row=4) # spacer
1447
+ infoframe = Frame(f1)
1448
+ infoframe.grid(column=0, row=5, sticky="news")
1449
+ Frame(f1, height=8).grid(column=0, row=6) # spacer
1450
+ progressframe = Frame(
1451
+ self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
1452
+ )
1453
+ progressframe.pack(side="bottom", fill="x")
1454
+ self.top["border"] = 0
1455
+ self.top["highlightthickness"] = 0
1456
+
1457
+ # Create the tabs
1458
+ self._tab_names = ["Collections", "Corpora", "Models", "All Packages"]
1459
+ self._tabs = {}
1460
+ for i, tab in enumerate(self._tab_names):
1461
+ label = Label(tabframe, text=tab, font=self._TAB_FONT)
1462
+ label.pack(side="left", padx=((i + 1) % 2) * 10)
1463
+ label.bind("<Button-1>", self._select_tab)
1464
+ self._tabs[tab.lower()] = label
1465
+
1466
+ # Create the table.
1467
+ column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
1468
+ self._table = Table(
1469
+ tableframe,
1470
+ self.COLUMNS,
1471
+ column_weights=column_weights,
1472
+ highlightthickness=0,
1473
+ listbox_height=16,
1474
+ reprfunc=self._table_reprfunc,
1475
+ )
1476
+ self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
1477
+ for i, column in enumerate(self.COLUMNS):
1478
+ width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
1479
+ self._table.columnconfig(i, width=width)
1480
+ self._table.pack(expand=True, fill="both")
1481
+ self._table.focus()
1482
+ self._table.bind_to_listboxes("<Double-Button-1>", self._download)
1483
+ self._table.bind("<space>", self._table_mark)
1484
+ self._table.bind("<Return>", self._download)
1485
+ self._table.bind("<Left>", self._prev_tab)
1486
+ self._table.bind("<Right>", self._next_tab)
1487
+ self._table.bind("<Control-a>", self._mark_all)
1488
+
1489
+ # Create entry boxes for URL & download_dir
1490
+ infoframe.grid_columnconfigure(1, weight=1)
1491
+
1492
+ info = [
1493
+ ("url", "Server Index:", self._set_url),
1494
+ ("download_dir", "Download Directory:", self._set_download_dir),
1495
+ ]
1496
+ self._info = {}
1497
+ for (i, (key, label, callback)) in enumerate(info):
1498
+ Label(infoframe, text=label).grid(column=0, row=i, sticky="e")
1499
+ entry = Entry(
1500
+ infoframe,
1501
+ font="courier",
1502
+ relief="groove",
1503
+ disabledforeground="#007aff",
1504
+ foreground="#007aff",
1505
+ )
1506
+ self._info[key] = (entry, callback)
1507
+ entry.bind("<Return>", self._info_save)
1508
+ entry.bind("<Button-1>", lambda e, key=key: self._info_edit(key))
1509
+ entry.grid(column=1, row=i, sticky="ew")
1510
+
1511
+ # If the user edits url or download_dir, and then clicks outside
1512
+ # the entry box, then save their results.
1513
+ self.top.bind("<Button-1>", self._info_save)
1514
+
1515
+ # Create Download & Refresh buttons.
1516
+ self._download_button = Button(
1517
+ buttonframe, text="Download", command=self._download, width=8
1518
+ )
1519
+ self._download_button.pack(side="left")
1520
+ self._refresh_button = Button(
1521
+ buttonframe, text="Refresh", command=self._refresh, width=8
1522
+ )
1523
+ self._refresh_button.pack(side="right")
1524
+
1525
+ # Create Progress bar
1526
+ self._progresslabel = Label(
1527
+ progressframe,
1528
+ text="",
1529
+ foreground=self._BACKDROP_COLOR[0],
1530
+ background=self._BACKDROP_COLOR[1],
1531
+ )
1532
+ self._progressbar = Canvas(
1533
+ progressframe,
1534
+ width=200,
1535
+ height=16,
1536
+ background=self._PROGRESS_COLOR[1],
1537
+ relief="sunken",
1538
+ border=1,
1539
+ )
1540
+ self._init_progressbar()
1541
+ self._progressbar.pack(side="right")
1542
+ self._progresslabel.pack(side="left")
1543
+
1544
+ def _init_menu(self):
1545
+ menubar = Menu(self.top)
1546
+
1547
+ filemenu = Menu(menubar, tearoff=0)
1548
+ filemenu.add_command(
1549
+ label="Download", underline=0, command=self._download, accelerator="Return"
1550
+ )
1551
+ filemenu.add_separator()
1552
+ filemenu.add_command(
1553
+ label="Change Server Index",
1554
+ underline=7,
1555
+ command=lambda: self._info_edit("url"),
1556
+ )
1557
+ filemenu.add_command(
1558
+ label="Change Download Directory",
1559
+ underline=0,
1560
+ command=lambda: self._info_edit("download_dir"),
1561
+ )
1562
+ filemenu.add_separator()
1563
+ filemenu.add_command(label="Show Log", underline=5, command=self._show_log)
1564
+ filemenu.add_separator()
1565
+ filemenu.add_command(
1566
+ label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
1567
+ )
1568
+ menubar.add_cascade(label="File", underline=0, menu=filemenu)
1569
+
1570
+ # Create a menu to control which columns of the table are
1571
+ # shown. n.b.: we never hide the first two columns (mark and
1572
+ # identifier).
1573
+ viewmenu = Menu(menubar, tearoff=0)
1574
+ for column in self._table.column_names[2:]:
1575
+ var = IntVar(self.top)
1576
+ assert column not in self._column_vars
1577
+ self._column_vars[column] = var
1578
+ if column in self.INITIAL_COLUMNS:
1579
+ var.set(1)
1580
+ viewmenu.add_checkbutton(
1581
+ label=column, underline=0, variable=var, command=self._select_columns
1582
+ )
1583
+ menubar.add_cascade(label="View", underline=0, menu=viewmenu)
1584
+
1585
+ # Create a sort menu
1586
+ # [xx] this should be selectbuttons; and it should include
1587
+ # reversed sorts as options.
1588
+ sortmenu = Menu(menubar, tearoff=0)
1589
+ for column in self._table.column_names[1:]:
1590
+ sortmenu.add_command(
1591
+ label="Sort by %s" % column,
1592
+ command=(lambda c=column: self._table.sort_by(c, "ascending")),
1593
+ )
1594
+ sortmenu.add_separator()
1595
+ # sortmenu.add_command(label='Descending Sort:')
1596
+ for column in self._table.column_names[1:]:
1597
+ sortmenu.add_command(
1598
+ label="Reverse sort by %s" % column,
1599
+ command=(lambda c=column: self._table.sort_by(c, "descending")),
1600
+ )
1601
+ menubar.add_cascade(label="Sort", underline=0, menu=sortmenu)
1602
+
1603
+ helpmenu = Menu(menubar, tearoff=0)
1604
+ helpmenu.add_command(label="About", underline=0, command=self.about)
1605
+ helpmenu.add_command(
1606
+ label="Instructions", underline=0, command=self.help, accelerator="F1"
1607
+ )
1608
+ menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
1609
+ self.top.bind("<F1>", self.help)
1610
+
1611
+ self.top.config(menu=menubar)
1612
+
1613
+ def _select_columns(self):
1614
+ for (column, var) in self._column_vars.items():
1615
+ if var.get():
1616
+ self._table.show_column(column)
1617
+ else:
1618
+ self._table.hide_column(column)
1619
+
1620
+ def _refresh(self):
1621
+ self._ds.clear_status_cache()
1622
+ try:
1623
+ self._fill_table()
1624
+ except HTTPError as e:
1625
+ showerror("Error reading from server", e)
1626
+ except URLError as e:
1627
+ showerror("Error connecting to server", e.reason)
1628
+ self._table.select(0)
1629
+
1630
+ def _info_edit(self, info_key):
1631
+ self._info_save() # just in case.
1632
+ (entry, callback) = self._info[info_key]
1633
+ entry["state"] = "normal"
1634
+ entry["relief"] = "sunken"
1635
+ entry.focus()
1636
+
1637
+ def _info_save(self, e=None):
1638
+ focus = self._table
1639
+ for entry, callback in self._info.values():
1640
+ if entry["state"] == "disabled":
1641
+ continue
1642
+ if e is not None and e.widget is entry and e.keysym != "Return":
1643
+ focus = entry
1644
+ else:
1645
+ entry["state"] = "disabled"
1646
+ entry["relief"] = "groove"
1647
+ callback(entry.get())
1648
+ focus.focus()
1649
+
1650
+ def _table_reprfunc(self, row, col, val):
1651
+ if self._table.column_names[col].endswith("Size"):
1652
+ if isinstance(val, str):
1653
+ return " %s" % val
1654
+ elif val < 1024**2:
1655
+ return " %.1f KB" % (val / 1024.0**1)
1656
+ elif val < 1024**3:
1657
+ return " %.1f MB" % (val / 1024.0**2)
1658
+ else:
1659
+ return " %.1f GB" % (val / 1024.0**3)
1660
+
1661
+ if col in (0, ""):
1662
+ return str(val)
1663
+ else:
1664
+ return " %s" % val
1665
+
1666
+ def _set_url(self, url):
1667
+ if url == self._ds.url:
1668
+ return
1669
+ try:
1670
+ self._ds.url = url
1671
+ self._fill_table()
1672
+ except OSError as e:
1673
+ showerror("Error Setting Server Index", str(e))
1674
+ self._show_info()
1675
+
1676
+ def _set_download_dir(self, download_dir):
1677
+ if self._ds.download_dir == download_dir:
1678
+ return
1679
+ # check if the dir exists, and if not, ask if we should create it?
1680
+
1681
+ # Clear our status cache, & re-check what's installed
1682
+ self._ds.download_dir = download_dir
1683
+ try:
1684
+ self._fill_table()
1685
+ except HTTPError as e:
1686
+ showerror("Error reading from server", e)
1687
+ except URLError as e:
1688
+ showerror("Error connecting to server", e.reason)
1689
+ self._show_info()
1690
+
1691
+ def _show_info(self):
1692
+ print("showing info", self._ds.url)
1693
+ for entry, cb in self._info.values():
1694
+ entry["state"] = "normal"
1695
+ entry.delete(0, "end")
1696
+ self._info["url"][0].insert(0, self._ds.url)
1697
+ self._info["download_dir"][0].insert(0, self._ds.download_dir)
1698
+ for entry, cb in self._info.values():
1699
+ entry["state"] = "disabled"
1700
+
1701
+ def _prev_tab(self, *e):
1702
+ for i, tab in enumerate(self._tab_names):
1703
+ if tab.lower() == self._tab and i > 0:
1704
+ self._tab = self._tab_names[i - 1].lower()
1705
+ try:
1706
+ return self._fill_table()
1707
+ except HTTPError as e:
1708
+ showerror("Error reading from server", e)
1709
+ except URLError as e:
1710
+ showerror("Error connecting to server", e.reason)
1711
+
1712
+ def _next_tab(self, *e):
1713
+ for i, tab in enumerate(self._tab_names):
1714
+ if tab.lower() == self._tab and i < (len(self._tabs) - 1):
1715
+ self._tab = self._tab_names[i + 1].lower()
1716
+ try:
1717
+ return self._fill_table()
1718
+ except HTTPError as e:
1719
+ showerror("Error reading from server", e)
1720
+ except URLError as e:
1721
+ showerror("Error connecting to server", e.reason)
1722
+
1723
+ def _select_tab(self, event):
1724
+ self._tab = event.widget["text"].lower()
1725
+ try:
1726
+ self._fill_table()
1727
+ except HTTPError as e:
1728
+ showerror("Error reading from server", e)
1729
+ except URLError as e:
1730
+ showerror("Error connecting to server", e.reason)
1731
+
1732
+ _tab = "collections"
1733
+ # _tab = 'corpora'
1734
+ _rows = None
1735
+
1736
+ def _fill_table(self):
1737
+ selected_row = self._table.selected_row()
1738
+ self._table.clear()
1739
+ if self._tab == "all packages":
1740
+ items = self._ds.packages()
1741
+ elif self._tab == "corpora":
1742
+ items = self._ds.corpora()
1743
+ elif self._tab == "models":
1744
+ items = self._ds.models()
1745
+ elif self._tab == "collections":
1746
+ items = self._ds.collections()
1747
+ else:
1748
+ assert 0, "bad tab value %r" % self._tab
1749
+ rows = [self._package_to_columns(item) for item in items]
1750
+ self._table.extend(rows)
1751
+
1752
+ # Highlight the active tab.
1753
+ for tab, label in self._tabs.items():
1754
+ if tab == self._tab:
1755
+ label.configure(
1756
+ foreground=self._FRONT_TAB_COLOR[0],
1757
+ background=self._FRONT_TAB_COLOR[1],
1758
+ )
1759
+ else:
1760
+ label.configure(
1761
+ foreground=self._BACK_TAB_COLOR[0],
1762
+ background=self._BACK_TAB_COLOR[1],
1763
+ )
1764
+
1765
+ self._table.sort_by("Identifier", order="ascending")
1766
+ self._color_table()
1767
+ self._table.select(selected_row)
1768
+
1769
+ # This is a hack, because the scrollbar isn't updating its
1770
+ # position right -- I'm not sure what the underlying cause is
1771
+ # though. (This is on OS X w/ python 2.5) The length of
1772
+ # delay that's necessary seems to depend on how fast the
1773
+ # comptuer is. :-/
1774
+ self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
1775
+ self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
1776
+
1777
+ def _update_table_status(self):
1778
+ for row_num in range(len(self._table)):
1779
+ status = self._ds.status(self._table[row_num, "Identifier"])
1780
+ self._table[row_num, "Status"] = status
1781
+ self._color_table()
1782
+
1783
+ def _download(self, *e):
1784
+ # If we're using threads, then delegate to the threaded
1785
+ # downloader instead.
1786
+ if self._use_threads:
1787
+ return self._download_threaded(*e)
1788
+
1789
+ marked = [
1790
+ self._table[row, "Identifier"]
1791
+ for row in range(len(self._table))
1792
+ if self._table[row, 0] != ""
1793
+ ]
1794
+ selection = self._table.selected_row()
1795
+ if not marked and selection is not None:
1796
+ marked = [self._table[selection, "Identifier"]]
1797
+
1798
+ download_iter = self._ds.incr_download(marked, self._ds.download_dir)
1799
+ self._log_indent = 0
1800
+ self._download_cb(download_iter, marked)
1801
+
1802
+ _DL_DELAY = 10
1803
+
1804
+ def _download_cb(self, download_iter, ids):
1805
+ try:
1806
+ msg = next(download_iter)
1807
+ except StopIteration:
1808
+ # self._fill_table(sort=False)
1809
+ self._update_table_status()
1810
+ afterid = self.top.after(10, self._show_progress, 0)
1811
+ self._afterid["_download_cb"] = afterid
1812
+ return
1813
+
1814
+ def show(s):
1815
+ self._progresslabel["text"] = s
1816
+ self._log(s)
1817
+
1818
+ if isinstance(msg, ProgressMessage):
1819
+ self._show_progress(msg.progress)
1820
+ elif isinstance(msg, ErrorMessage):
1821
+ show(msg.message)
1822
+ if msg.package is not None:
1823
+ self._select(msg.package.id)
1824
+ self._show_progress(None)
1825
+ return # halt progress.
1826
+ elif isinstance(msg, StartCollectionMessage):
1827
+ show("Downloading collection %s" % msg.collection.id)
1828
+ self._log_indent += 1
1829
+ elif isinstance(msg, StartPackageMessage):
1830
+ show("Downloading package %s" % msg.package.id)
1831
+ elif isinstance(msg, UpToDateMessage):
1832
+ show("Package %s is up-to-date!" % msg.package.id)
1833
+ # elif isinstance(msg, StaleMessage):
1834
+ # show('Package %s is out-of-date or corrupt' % msg.package.id)
1835
+ elif isinstance(msg, FinishDownloadMessage):
1836
+ show("Finished downloading %r." % msg.package.id)
1837
+ elif isinstance(msg, StartUnzipMessage):
1838
+ show("Unzipping %s" % msg.package.filename)
1839
+ elif isinstance(msg, FinishCollectionMessage):
1840
+ self._log_indent -= 1
1841
+ show("Finished downloading collection %r." % msg.collection.id)
1842
+ self._clear_mark(msg.collection.id)
1843
+ elif isinstance(msg, FinishPackageMessage):
1844
+ self._clear_mark(msg.package.id)
1845
+ afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
1846
+ self._afterid["_download_cb"] = afterid
1847
+
1848
+ def _select(self, id):
1849
+ for row in range(len(self._table)):
1850
+ if self._table[row, "Identifier"] == id:
1851
+ self._table.select(row)
1852
+ return
1853
+
1854
+ def _color_table(self):
1855
+ # Color rows according to status.
1856
+ for row in range(len(self._table)):
1857
+ bg, sbg = self._ROW_COLOR[self._table[row, "Status"]]
1858
+ fg, sfg = ("black", "white")
1859
+ self._table.rowconfig(
1860
+ row,
1861
+ foreground=fg,
1862
+ selectforeground=sfg,
1863
+ background=bg,
1864
+ selectbackground=sbg,
1865
+ )
1866
+ # Color the marked column
1867
+ self._table.itemconfigure(
1868
+ row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
1869
+ )
1870
+
1871
+ def _clear_mark(self, id):
1872
+ for row in range(len(self._table)):
1873
+ if self._table[row, "Identifier"] == id:
1874
+ self._table[row, 0] = ""
1875
+
1876
+ def _mark_all(self, *e):
1877
+ for row in range(len(self._table)):
1878
+ self._table[row, 0] = "X"
1879
+
1880
+ def _table_mark(self, *e):
1881
+ selection = self._table.selected_row()
1882
+ if selection >= 0:
1883
+ if self._table[selection][0] != "":
1884
+ self._table[selection, 0] = ""
1885
+ else:
1886
+ self._table[selection, 0] = "X"
1887
+ self._table.select(delta=1)
1888
+
1889
+ def _show_log(self):
1890
+ text = "\n".join(self._log_messages)
1891
+ ShowText(self.top, "NLTK Downloader Log", text)
1892
+
1893
+ def _package_to_columns(self, pkg):
1894
+ """
1895
+ Given a package, return a list of values describing that
1896
+ package, one for each column in ``self.COLUMNS``.
1897
+ """
1898
+ row = []
1899
+ for column_index, column_name in enumerate(self.COLUMNS):
1900
+ if column_index == 0: # Mark:
1901
+ row.append("")
1902
+ elif column_name == "Identifier":
1903
+ row.append(pkg.id)
1904
+ elif column_name == "Status":
1905
+ row.append(self._ds.status(pkg))
1906
+ else:
1907
+ attr = column_name.lower().replace(" ", "_")
1908
+ row.append(getattr(pkg, attr, "n/a"))
1909
+ return row
1910
+
1911
+ # /////////////////////////////////////////////////////////////////
1912
+ # External Interface
1913
+ # /////////////////////////////////////////////////////////////////
1914
+
1915
+ def destroy(self, *e):
1916
+ if self._destroyed:
1917
+ return
1918
+ self.top.destroy()
1919
+ self._destroyed = True
1920
+
1921
+ def _destroy(self, *e):
1922
+ if self.top is not None:
1923
+ for afterid in self._afterid.values():
1924
+ self.top.after_cancel(afterid)
1925
+
1926
+ # Abort any download in progress.
1927
+ if self._downloading and self._use_threads:
1928
+ self._abort_download()
1929
+
1930
+ # Make sure the garbage collector destroys these now;
1931
+ # otherwise, they may get destroyed when we're not in the main
1932
+ # thread, which would make Tkinter unhappy.
1933
+ self._column_vars.clear()
1934
+
1935
+ def mainloop(self, *args, **kwargs):
1936
+ self.top.mainloop(*args, **kwargs)
1937
+
1938
+ # /////////////////////////////////////////////////////////////////
1939
+ # HELP
1940
+ # /////////////////////////////////////////////////////////////////
1941
+
1942
+ HELP = textwrap.dedent(
1943
+ """\
1944
+ This tool can be used to download a variety of corpora and models
1945
+ that can be used with NLTK. Each corpus or model is distributed
1946
+ in a single zip file, known as a \"package file.\" You can
1947
+ download packages individually, or you can download pre-defined
1948
+ collections of packages.
1949
+
1950
+ When you download a package, it will be saved to the \"download
1951
+ directory.\" A default download directory is chosen when you run
1952
+
1953
+ the downloader; but you may also select a different download
1954
+ directory. On Windows, the default download directory is
1955
+
1956
+
1957
+ \"package.\"
1958
+
1959
+ The NLTK downloader can be used to download a variety of corpora,
1960
+ models, and other data packages.
1961
+
1962
+ Keyboard shortcuts::
1963
+ [return]\t Download
1964
+ [up]\t Select previous package
1965
+ [down]\t Select next package
1966
+ [left]\t Select previous tab
1967
+ [right]\t Select next tab
1968
+ """
1969
+ )
1970
+
1971
+ def help(self, *e):
1972
+ # The default font's not very legible; try using 'fixed' instead.
1973
+ try:
1974
+ ShowText(
1975
+ self.top,
1976
+ "Help: NLTK Downloader",
1977
+ self.HELP.strip(),
1978
+ width=75,
1979
+ font="fixed",
1980
+ )
1981
+ except:
1982
+ ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75)
1983
+
1984
+ def about(self, *e):
1985
+ ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
1986
+ TITLE = "About: NLTK Downloader"
1987
+ try:
1988
+ from tkinter.messagebox import Message
1989
+
1990
+ Message(message=ABOUT, title=TITLE).show()
1991
+ except ImportError:
1992
+ ShowText(self.top, TITLE, ABOUT)
1993
+
1994
+ # /////////////////////////////////////////////////////////////////
1995
+ # Progress Bar
1996
+ # /////////////////////////////////////////////////////////////////
1997
+
1998
+ _gradient_width = 5
1999
+
2000
+ def _init_progressbar(self):
2001
+ c = self._progressbar
2002
+ width, height = int(c["width"]), int(c["height"])
2003
+ for i in range(0, (int(c["width"]) * 2) // self._gradient_width):
2004
+ c.create_line(
2005
+ i * self._gradient_width + 20,
2006
+ -20,
2007
+ i * self._gradient_width - height - 20,
2008
+ height + 20,
2009
+ width=self._gradient_width,
2010
+ fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12),
2011
+ )
2012
+ c.addtag_all("gradient")
2013
+ c.itemconfig("gradient", state="hidden")
2014
+
2015
+ # This is used to display progress
2016
+ c.addtag_withtag(
2017
+ "redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
2018
+ )
2019
+
2020
+ def _show_progress(self, percent):
2021
+ c = self._progressbar
2022
+ if percent is None:
2023
+ c.coords("redbox", 0, 0, 0, 0)
2024
+ c.itemconfig("gradient", state="hidden")
2025
+ else:
2026
+ width, height = int(c["width"]), int(c["height"])
2027
+ x = percent * int(width) // 100 + 1
2028
+ c.coords("redbox", 0, 0, x, height + 1)
2029
+
2030
+ def _progress_alive(self):
2031
+ c = self._progressbar
2032
+ if not self._downloading:
2033
+ c.itemconfig("gradient", state="hidden")
2034
+ else:
2035
+ c.itemconfig("gradient", state="normal")
2036
+ x1, y1, x2, y2 = c.bbox("gradient")
2037
+ if x1 <= -100:
2038
+ c.move("gradient", (self._gradient_width * 6) - 4, 0)
2039
+ else:
2040
+ c.move("gradient", -4, 0)
2041
+ afterid = self.top.after(200, self._progress_alive)
2042
+ self._afterid["_progress_alive"] = afterid
2043
+
2044
+ # /////////////////////////////////////////////////////////////////
2045
+ # Threaded downloader
2046
+ # /////////////////////////////////////////////////////////////////
2047
+
2048
+ def _download_threaded(self, *e):
2049
+ # If the user tries to start a new download while we're already
2050
+ # downloading something, then abort the current download instead.
2051
+ if self._downloading:
2052
+ self._abort_download()
2053
+ return
2054
+
2055
+ # Change the 'download' button to an 'abort' button.
2056
+ self._download_button["text"] = "Cancel"
2057
+
2058
+ marked = [
2059
+ self._table[row, "Identifier"]
2060
+ for row in range(len(self._table))
2061
+ if self._table[row, 0] != ""
2062
+ ]
2063
+ selection = self._table.selected_row()
2064
+ if not marked and selection is not None:
2065
+ marked = [self._table[selection, "Identifier"]]
2066
+
2067
+ # Create a new data server object for the download operation,
2068
+ # just in case the user modifies our data server during the
2069
+ # download (e.g., clicking 'refresh' or editing the index url).
2070
+ ds = Downloader(self._ds.url, self._ds.download_dir)
2071
+
2072
+ # Start downloading in a separate thread.
2073
+ assert self._download_msg_queue == []
2074
+ assert self._download_abort_queue == []
2075
+ self._DownloadThread(
2076
+ ds,
2077
+ marked,
2078
+ self._download_lock,
2079
+ self._download_msg_queue,
2080
+ self._download_abort_queue,
2081
+ ).start()
2082
+
2083
+ # Monitor the download message queue & display its progress.
2084
+ self._log_indent = 0
2085
+ self._downloading = True
2086
+ self._monitor_message_queue()
2087
+
2088
+ # Display an indication that we're still alive and well by
2089
+ # cycling the progress bar.
2090
+ self._progress_alive()
2091
+
2092
+ def _abort_download(self):
2093
+ if self._downloading:
2094
+ self._download_lock.acquire()
2095
+ self._download_abort_queue.append("abort")
2096
+ self._download_lock.release()
2097
+
2098
+ class _DownloadThread(threading.Thread):
2099
+ def __init__(self, data_server, items, lock, message_queue, abort):
2100
+ self.data_server = data_server
2101
+ self.items = items
2102
+ self.lock = lock
2103
+ self.message_queue = message_queue
2104
+ self.abort = abort
2105
+ threading.Thread.__init__(self)
2106
+
2107
+ def run(self):
2108
+ for msg in self.data_server.incr_download(self.items):
2109
+ self.lock.acquire()
2110
+ self.message_queue.append(msg)
2111
+ # Check if we've been told to kill ourselves:
2112
+ if self.abort:
2113
+ self.message_queue.append("aborted")
2114
+ self.lock.release()
2115
+ return
2116
+ self.lock.release()
2117
+ self.lock.acquire()
2118
+ self.message_queue.append("finished")
2119
+ self.lock.release()
2120
+
2121
+ _MONITOR_QUEUE_DELAY = 100
2122
+
2123
+ def _monitor_message_queue(self):
2124
+ def show(s):
2125
+ self._progresslabel["text"] = s
2126
+ self._log(s)
2127
+
2128
+ # Try to acquire the lock; if it's busy, then just try again later.
2129
+ if not self._download_lock.acquire():
2130
+ return
2131
+ for msg in self._download_msg_queue:
2132
+
2133
+ # Done downloading?
2134
+ if msg == "finished" or msg == "aborted":
2135
+ # self._fill_table(sort=False)
2136
+ self._update_table_status()
2137
+ self._downloading = False
2138
+ self._download_button["text"] = "Download"
2139
+ del self._download_msg_queue[:]
2140
+ del self._download_abort_queue[:]
2141
+ self._download_lock.release()
2142
+ if msg == "aborted":
2143
+ show("Download aborted!")
2144
+ self._show_progress(None)
2145
+ else:
2146
+ afterid = self.top.after(100, self._show_progress, None)
2147
+ self._afterid["_monitor_message_queue"] = afterid
2148
+ return
2149
+
2150
+ # All other messages
2151
+ elif isinstance(msg, ProgressMessage):
2152
+ self._show_progress(msg.progress)
2153
+ elif isinstance(msg, ErrorMessage):
2154
+ show(msg.message)
2155
+ if msg.package is not None:
2156
+ self._select(msg.package.id)
2157
+ self._show_progress(None)
2158
+ self._downloading = False
2159
+ return # halt progress.
2160
+ elif isinstance(msg, StartCollectionMessage):
2161
+ show("Downloading collection %r" % msg.collection.id)
2162
+ self._log_indent += 1
2163
+ elif isinstance(msg, StartPackageMessage):
2164
+ self._ds.clear_status_cache(msg.package.id)
2165
+ show("Downloading package %r" % msg.package.id)
2166
+ elif isinstance(msg, UpToDateMessage):
2167
+ show("Package %s is up-to-date!" % msg.package.id)
2168
+ # elif isinstance(msg, StaleMessage):
2169
+ # show('Package %s is out-of-date or corrupt; updating it' %
2170
+ # msg.package.id)
2171
+ elif isinstance(msg, FinishDownloadMessage):
2172
+ show("Finished downloading %r." % msg.package.id)
2173
+ elif isinstance(msg, StartUnzipMessage):
2174
+ show("Unzipping %s" % msg.package.filename)
2175
+ elif isinstance(msg, FinishUnzipMessage):
2176
+ show("Finished installing %s" % msg.package.id)
2177
+ elif isinstance(msg, FinishCollectionMessage):
2178
+ self._log_indent -= 1
2179
+ show("Finished downloading collection %r." % msg.collection.id)
2180
+ self._clear_mark(msg.collection.id)
2181
+ elif isinstance(msg, FinishPackageMessage):
2182
+ self._update_table_status()
2183
+ self._clear_mark(msg.package.id)
2184
+
2185
+ # Let the user know when we're aborting a download (but
2186
+ # waiting for a good point to abort it, so we don't end up
2187
+ # with a partially unzipped package or anything like that).
2188
+ if self._download_abort_queue:
2189
+ self._progresslabel["text"] = "Aborting download..."
2190
+
2191
+ # Clear the message queue and then release the lock
2192
+ del self._download_msg_queue[:]
2193
+ self._download_lock.release()
2194
+
2195
+ # Check the queue again after MONITOR_QUEUE_DELAY msec.
2196
+ afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
2197
+ self._afterid["_monitor_message_queue"] = afterid
2198
+
2199
+
2200
+ ######################################################################
2201
+ # Helper Functions
2202
+ ######################################################################
2203
+ # [xx] It may make sense to move these to nltk.internals.
2204
+
2205
+
2206
+ def md5_hexdigest(file):
2207
+ """
2208
+ Calculate and return the MD5 checksum for a given file.
2209
+ ``file`` may either be a filename or an open stream.
2210
+ """
2211
+ if isinstance(file, str):
2212
+ with open(file, "rb") as infile:
2213
+ return _md5_hexdigest(infile)
2214
+ return _md5_hexdigest(file)
2215
+
2216
+
2217
+ def _md5_hexdigest(fp):
2218
+ md5_digest = md5()
2219
+ while True:
2220
+ block = fp.read(1024 * 16) # 16k blocks
2221
+ if not block:
2222
+ break
2223
+ md5_digest.update(block)
2224
+ return md5_digest.hexdigest()
2225
+
2226
+
2227
+ # change this to periodically yield progress messages?
2228
+ # [xx] get rid of topdir parameter -- we should be checking
2229
+ # this when we build the index, anyway.
2230
+ def unzip(filename, root, verbose=True):
2231
+ """
2232
+ Extract the contents of the zip file ``filename`` into the
2233
+ directory ``root``.
2234
+ """
2235
+ for message in _unzip_iter(filename, root, verbose):
2236
+ if isinstance(message, ErrorMessage):
2237
+ raise Exception(message)
2238
+
2239
+
2240
+ def _unzip_iter(filename, root, verbose=True):
2241
+ if verbose:
2242
+ sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
2243
+ sys.stdout.flush()
2244
+
2245
+ try:
2246
+ zf = zipfile.ZipFile(filename)
2247
+ except zipfile.error as e:
2248
+ yield ErrorMessage(filename, "Error with downloaded zip file")
2249
+ return
2250
+ except Exception as e:
2251
+ yield ErrorMessage(filename, e)
2252
+ return
2253
+
2254
+ zf.extractall(root)
2255
+
2256
+ if verbose:
2257
+ print()
2258
+
2259
+
2260
+ ######################################################################
2261
+ # Index Builder
2262
+ ######################################################################
2263
+ # This may move to a different file sometime.
2264
+
2265
+
2266
+ def build_index(root, base_url):
2267
+ """
2268
+ Create a new data.xml index file, by combining the xml description
2269
+ files for various packages and collections. ``root`` should be the
2270
+ path to a directory containing the package xml and zip files; and
2271
+ the collection xml files. The ``root`` directory is expected to
2272
+ have the following subdirectories::
2273
+
2274
+ root/
2275
+ packages/ .................. subdirectory for packages
2276
+ corpora/ ................. zip & xml files for corpora
2277
+ grammars/ ................ zip & xml files for grammars
2278
+ taggers/ ................. zip & xml files for taggers
2279
+ tokenizers/ .............. zip & xml files for tokenizers
2280
+ etc.
2281
+ collections/ ............... xml files for collections
2282
+
2283
+ For each package, there should be two files: ``package.zip``
2284
+ (where *package* is the package name)
2285
+ which contains the package itself as a compressed zip file; and
2286
+ ``package.xml``, which is an xml description of the package. The
2287
+ zipfile ``package.zip`` should expand to a single subdirectory
2288
+ named ``package/``. The base filename ``package`` must match
2289
+ the identifier given in the package's xml file.
2290
+
2291
+ For each collection, there should be a single file ``collection.zip``
2292
+ describing the collection, where *collection* is the name of the collection.
2293
+
2294
+ All identifiers (for both packages and collections) must be unique.
2295
+ """
2296
+ # Find all packages.
2297
+ packages = []
2298
+ for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
2299
+ zipstat = os.stat(zf.filename)
2300
+ url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}"
2301
+ unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
2302
+
2303
+ # Fill in several fields of the package xml with calculated values.
2304
+ pkg_xml.set("unzipped_size", "%s" % unzipped_size)
2305
+ pkg_xml.set("size", "%s" % zipstat.st_size)
2306
+ pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
2307
+ pkg_xml.set("subdir", subdir)
2308
+ # pkg_xml.set('svn_revision', _svn_revision(zf.filename))
2309
+ if not pkg_xml.get("url"):
2310
+ pkg_xml.set("url", url)
2311
+
2312
+ # Record the package.
2313
+ packages.append(pkg_xml)
2314
+
2315
+ # Find all collections
2316
+ collections = list(_find_collections(os.path.join(root, "collections")))
2317
+
2318
+ # Check that all UIDs are unique
2319
+ uids = set()
2320
+ for item in packages + collections:
2321
+ if item.get("id") in uids:
2322
+ raise ValueError("Duplicate UID: %s" % item.get("id"))
2323
+ uids.add(item.get("id"))
2324
+
2325
+ # Put it all together
2326
+ top_elt = ElementTree.Element("nltk_data")
2327
+ top_elt.append(ElementTree.Element("packages"))
2328
+ top_elt[0].extend(sorted(packages, key=lambda package: package.get("id")))
2329
+ top_elt.append(ElementTree.Element("collections"))
2330
+ top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id")))
2331
+
2332
+ _indent_xml(top_elt)
2333
+ return top_elt
2334
+
2335
+
2336
+ def _indent_xml(xml, prefix=""):
2337
+ """
2338
+ Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
2339
+ (and its descendents) ``text`` and ``tail`` attributes to generate
2340
+ an indented tree, where each nested element is indented by 2
2341
+ spaces with respect to its parent.
2342
+ """
2343
+ if len(xml) > 0:
2344
+ xml.text = (xml.text or "").strip() + "\n" + prefix + " "
2345
+ for child in xml:
2346
+ _indent_xml(child, prefix + " ")
2347
+ for child in xml[:-1]:
2348
+ child.tail = (child.tail or "").strip() + "\n" + prefix + " "
2349
+ xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
2350
+
2351
+
2352
+ def _check_package(pkg_xml, zipfilename, zf):
2353
+ """
2354
+ Helper for ``build_index()``: Perform some checks to make sure that
2355
+ the given package is consistent.
2356
+ """
2357
+ # The filename must patch the id given in the XML file.
2358
+ uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
2359
+ if pkg_xml.get("id") != uid:
2360
+ raise ValueError(
2361
+ "package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid)
2362
+ )
2363
+
2364
+ # Zip file must expand to a subdir whose name matches uid.
2365
+ if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
2366
+ raise ValueError(
2367
+ "Zipfile %s.zip does not expand to a single "
2368
+ "subdirectory %s/" % (uid, uid)
2369
+ )
2370
+
2371
+
2372
+ # update for git?
2373
+ def _svn_revision(filename):
2374
+ """
2375
+ Helper for ``build_index()``: Calculate the subversion revision
2376
+ number for a given file (by using ``subprocess`` to run ``svn``).
2377
+ """
2378
+ p = subprocess.Popen(
2379
+ ["svn", "status", "-v", filename],
2380
+ stdout=subprocess.PIPE,
2381
+ stderr=subprocess.PIPE,
2382
+ )
2383
+ (stdout, stderr) = p.communicate()
2384
+ if p.returncode != 0 or stderr or not stdout:
2385
+ raise ValueError(
2386
+ "Error determining svn_revision for %s: %s"
2387
+ % (os.path.split(filename)[1], textwrap.fill(stderr))
2388
+ )
2389
+ return stdout.split()[2]
2390
+
2391
+
2392
+ def _find_collections(root):
2393
+ """
2394
+ Helper for ``build_index()``: Yield a list of ElementTree.Element
2395
+ objects, each holding the xml for a single package collection.
2396
+ """
2397
+ for dirname, _subdirs, files in os.walk(root):
2398
+ for filename in files:
2399
+ if filename.endswith(".xml"):
2400
+ xmlfile = os.path.join(dirname, filename)
2401
+ yield ElementTree.parse(xmlfile).getroot()
2402
+
2403
+
2404
+ def _find_packages(root):
2405
+ """
2406
+ Helper for ``build_index()``: Yield a list of tuples
2407
+ ``(pkg_xml, zf, subdir)``, where:
2408
+ - ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
2409
+ package
2410
+ - ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
2411
+ - ``subdir`` is the subdirectory (relative to ``root``) where
2412
+ the package was found (e.g. 'corpora' or 'grammars').
2413
+ """
2414
+ from nltk.corpus.reader.util import _path_from
2415
+
2416
+ # Find all packages.
2417
+ packages = []
2418
+ for dirname, subdirs, files in os.walk(root):
2419
+ relpath = "/".join(_path_from(root, dirname))
2420
+ for filename in files:
2421
+ if filename.endswith(".xml"):
2422
+ xmlfilename = os.path.join(dirname, filename)
2423
+ zipfilename = xmlfilename[:-4] + ".zip"
2424
+ try:
2425
+ zf = zipfile.ZipFile(zipfilename)
2426
+ except Exception as e:
2427
+ raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e
2428
+ try:
2429
+ pkg_xml = ElementTree.parse(xmlfilename).getroot()
2430
+ except Exception as e:
2431
+ raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e
2432
+
2433
+ # Check that the UID matches the filename
2434
+ uid = os.path.split(xmlfilename[:-4])[1]
2435
+ if pkg_xml.get("id") != uid:
2436
+ raise ValueError(
2437
+ "package identifier mismatch (%s "
2438
+ "vs %s)" % (pkg_xml.get("id"), uid)
2439
+ )
2440
+
2441
+ # Check that the zipfile expands to a subdir whose
2442
+ # name matches the uid.
2443
+ if sum(
2444
+ (name != uid and not name.startswith(uid + "/"))
2445
+ for name in zf.namelist()
2446
+ ):
2447
+ raise ValueError(
2448
+ "Zipfile %s.zip does not expand to a "
2449
+ "single subdirectory %s/" % (uid, uid)
2450
+ )
2451
+
2452
+ yield pkg_xml, zf, relpath
2453
+
2454
+ elif filename.endswith(".zip"):
2455
+ # Warn user in case a .xml does not exist for a .zip
2456
+ resourcename = os.path.splitext(filename)[0]
2457
+ xmlfilename = os.path.join(dirname, resourcename + ".xml")
2458
+ if not os.path.exists(xmlfilename):
2459
+ warnings.warn(
2460
+ f"{filename} exists, but {resourcename + '.xml'} cannot be found! "
2461
+ f"This could mean that {resourcename} can not be downloaded.",
2462
+ stacklevel=2,
2463
+ )
2464
+
2465
+ # Don't recurse into svn subdirectories:
2466
+ try:
2467
+ subdirs.remove(".svn")
2468
+ except ValueError:
2469
+ pass
2470
+
2471
+
2472
+ ######################################################################
2473
+ # Main:
2474
+ ######################################################################
2475
+
2476
+ # There should be a command-line interface
2477
+
2478
+ # Aliases
2479
+ _downloader = Downloader()
2480
+ download = _downloader.download
2481
+
2482
+
2483
+ def download_shell():
2484
+ DownloaderShell(_downloader).run()
2485
+
2486
+
2487
+ def download_gui():
2488
+ DownloaderGUI(_downloader).mainloop()
2489
+
2490
+
2491
+ def update():
2492
+ _downloader.update()
2493
+
2494
+
2495
+ if __name__ == "__main__":
2496
+ from optparse import OptionParser
2497
+
2498
+ parser = OptionParser()
2499
+ parser.add_option(
2500
+ "-d",
2501
+ "--dir",
2502
+ dest="dir",
2503
+ help="download package to directory DIR",
2504
+ metavar="DIR",
2505
+ )
2506
+ parser.add_option(
2507
+ "-q",
2508
+ "--quiet",
2509
+ dest="quiet",
2510
+ action="store_true",
2511
+ default=False,
2512
+ help="work quietly",
2513
+ )
2514
+ parser.add_option(
2515
+ "-f",
2516
+ "--force",
2517
+ dest="force",
2518
+ action="store_true",
2519
+ default=False,
2520
+ help="download even if already installed",
2521
+ )
2522
+ parser.add_option(
2523
+ "-e",
2524
+ "--exit-on-error",
2525
+ dest="halt_on_error",
2526
+ action="store_true",
2527
+ default=False,
2528
+ help="exit if an error occurs",
2529
+ )
2530
+ parser.add_option(
2531
+ "-u",
2532
+ "--url",
2533
+ dest="server_index_url",
2534
+ default=os.environ.get("NLTK_DOWNLOAD_URL"),
2535
+ help="download server index url",
2536
+ )
2537
+
2538
+ (options, args) = parser.parse_args()
2539
+
2540
+ downloader = Downloader(server_index_url=options.server_index_url)
2541
+
2542
+ if args:
2543
+ for pkg_id in args:
2544
+ rv = downloader.download(
2545
+ info_or_id=pkg_id,
2546
+ download_dir=options.dir,
2547
+ quiet=options.quiet,
2548
+ force=options.force,
2549
+ halt_on_error=options.halt_on_error,
2550
+ )
2551
+ if rv == False and options.halt_on_error:
2552
+ break
2553
+ else:
2554
+ downloader.download(
2555
+ download_dir=options.dir,
2556
+ quiet=options.quiet,
2557
+ force=options.force,
2558
+ halt_on_error=options.halt_on_error,
2559
+ )
llmeval-env/lib/python3.10/site-packages/nltk/featstruct.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nltk/grammar.py ADDED
@@ -0,0 +1,1708 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Context Free Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Jason Narad <[email protected]>
7
+ # Peter Ljunglöf <[email protected]>
8
+ # Tom Aarsen <>
9
+ # URL: <https://www.nltk.org/>
10
+ # For license information, see LICENSE.TXT
11
+ #
12
+
13
+ """
14
+ Basic data classes for representing context free grammars. A
15
+ "grammar" specifies which trees can represent the structure of a
16
+ given text. Each of these trees is called a "parse tree" for the
17
+ text (or simply a "parse"). In a "context free" grammar, the set of
18
+ parse trees for any piece of a text can depend only on that piece, and
19
+ not on the rest of the text (i.e., the piece's context). Context free
20
+ grammars are often used to find possible syntactic structures for
21
+ sentences. In this context, the leaves of a parse tree are word
22
+ tokens; and the node values are phrasal categories, such as ``NP``
23
+ and ``VP``.
24
+
25
+ The ``CFG`` class is used to encode context free grammars. Each
26
+ ``CFG`` consists of a start symbol and a set of productions.
27
+ The "start symbol" specifies the root node value for parse trees. For example,
28
+ the start symbol for syntactic parsing is usually ``S``. Start
29
+ symbols are encoded using the ``Nonterminal`` class, which is discussed
30
+ below.
31
+
32
+ A Grammar's "productions" specify what parent-child relationships a parse
33
+ tree can contain. Each production specifies that a particular
34
+ node can be the parent of a particular set of children. For example,
35
+ the production ``<S> -> <NP> <VP>`` specifies that an ``S`` node can
36
+ be the parent of an ``NP`` node and a ``VP`` node.
37
+
38
+ Grammar productions are implemented by the ``Production`` class.
39
+ Each ``Production`` consists of a left hand side and a right hand
40
+ side. The "left hand side" is a ``Nonterminal`` that specifies the
41
+ node type for a potential parent; and the "right hand side" is a list
42
+ that specifies allowable children for that parent. This lists
43
+ consists of ``Nonterminals`` and text types: each ``Nonterminal``
44
+ indicates that the corresponding child may be a ``TreeToken`` with the
45
+ specified node type; and each text type indicates that the
46
+ corresponding child may be a ``Token`` with the with that type.
47
+
48
+ The ``Nonterminal`` class is used to distinguish node values from leaf
49
+ values. This prevents the grammar from accidentally using a leaf
50
+ value (such as the English word "A") as the node of a subtree. Within
51
+ a ``CFG``, all node values are wrapped in the ``Nonterminal``
52
+ class. Note, however, that the trees that are specified by the grammar do
53
+ *not* include these ``Nonterminal`` wrappers.
54
+
55
+ Grammars can also be given a more procedural interpretation. According to
56
+ this interpretation, a Grammar specifies any tree structure *tree* that
57
+ can be produced by the following procedure:
58
+
59
+ | Set tree to the start symbol
60
+ | Repeat until tree contains no more nonterminal leaves:
61
+ | Choose a production prod with whose left hand side
62
+ | lhs is a nonterminal leaf of tree.
63
+ | Replace the nonterminal leaf with a subtree, whose node
64
+ | value is the value wrapped by the nonterminal lhs, and
65
+ | whose children are the right hand side of prod.
66
+
67
+ The operation of replacing the left hand side (*lhs*) of a production
68
+ with the right hand side (*rhs*) in a tree (*tree*) is known as
69
+ "expanding" *lhs* to *rhs* in *tree*.
70
+ """
71
+ import re
72
+ from functools import total_ordering
73
+
74
+ from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader
75
+ from nltk.internals import raise_unorderable_types
76
+ from nltk.probability import ImmutableProbabilisticMixIn
77
+ from nltk.util import invert_graph, transitive_closure
78
+
79
+ #################################################################
80
+ # Nonterminal
81
+ #################################################################
82
+
83
+
84
+ @total_ordering
85
+ class Nonterminal:
86
+ """
87
+ A non-terminal symbol for a context free grammar. ``Nonterminal``
88
+ is a wrapper class for node values; it is used by ``Production``
89
+ objects to distinguish node values from leaf values.
90
+ The node value that is wrapped by a ``Nonterminal`` is known as its
91
+ "symbol". Symbols are typically strings representing phrasal
92
+ categories (such as ``"NP"`` or ``"VP"``). However, more complex
93
+ symbol types are sometimes used (e.g., for lexicalized grammars).
94
+ Since symbols are node values, they must be immutable and
95
+ hashable. Two ``Nonterminals`` are considered equal if their
96
+ symbols are equal.
97
+
98
+ :see: ``CFG``, ``Production``
99
+ :type _symbol: any
100
+ :ivar _symbol: The node value corresponding to this
101
+ ``Nonterminal``. This value must be immutable and hashable.
102
+ """
103
+
104
+ def __init__(self, symbol):
105
+ """
106
+ Construct a new non-terminal from the given symbol.
107
+
108
+ :type symbol: any
109
+ :param symbol: The node value corresponding to this
110
+ ``Nonterminal``. This value must be immutable and
111
+ hashable.
112
+ """
113
+ self._symbol = symbol
114
+
115
+ def symbol(self):
116
+ """
117
+ Return the node value corresponding to this ``Nonterminal``.
118
+
119
+ :rtype: (any)
120
+ """
121
+ return self._symbol
122
+
123
+ def __eq__(self, other):
124
+ """
125
+ Return True if this non-terminal is equal to ``other``. In
126
+ particular, return True if ``other`` is a ``Nonterminal``
127
+ and this non-terminal's symbol is equal to ``other`` 's symbol.
128
+
129
+ :rtype: bool
130
+ """
131
+ return type(self) == type(other) and self._symbol == other._symbol
132
+
133
+ def __ne__(self, other):
134
+ return not self == other
135
+
136
+ def __lt__(self, other):
137
+ if not isinstance(other, Nonterminal):
138
+ raise_unorderable_types("<", self, other)
139
+ return self._symbol < other._symbol
140
+
141
+ def __hash__(self):
142
+ return hash(self._symbol)
143
+
144
+ def __repr__(self):
145
+ """
146
+ Return a string representation for this ``Nonterminal``.
147
+
148
+ :rtype: str
149
+ """
150
+ if isinstance(self._symbol, str):
151
+ return "%s" % self._symbol
152
+ else:
153
+ return "%s" % repr(self._symbol)
154
+
155
+ def __str__(self):
156
+ """
157
+ Return a string representation for this ``Nonterminal``.
158
+
159
+ :rtype: str
160
+ """
161
+ if isinstance(self._symbol, str):
162
+ return "%s" % self._symbol
163
+ else:
164
+ return "%s" % repr(self._symbol)
165
+
166
+ def __div__(self, rhs):
167
+ """
168
+ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
169
+ the symbol for this nonterminal, and ``B`` is the symbol for rhs.
170
+
171
+ :param rhs: The nonterminal used to form the right hand side
172
+ of the new nonterminal.
173
+ :type rhs: Nonterminal
174
+ :rtype: Nonterminal
175
+ """
176
+ return Nonterminal(f"{self._symbol}/{rhs._symbol}")
177
+
178
+ def __truediv__(self, rhs):
179
+ """
180
+ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
181
+ the symbol for this nonterminal, and ``B`` is the symbol for rhs.
182
+ This function allows use of the slash ``/`` operator with
183
+ the future import of division.
184
+
185
+ :param rhs: The nonterminal used to form the right hand side
186
+ of the new nonterminal.
187
+ :type rhs: Nonterminal
188
+ :rtype: Nonterminal
189
+ """
190
+ return self.__div__(rhs)
191
+
192
+
193
+ def nonterminals(symbols):
194
+ """
195
+ Given a string containing a list of symbol names, return a list of
196
+ ``Nonterminals`` constructed from those symbols.
197
+
198
+ :param symbols: The symbol name string. This string can be
199
+ delimited by either spaces or commas.
200
+ :type symbols: str
201
+ :return: A list of ``Nonterminals`` constructed from the symbol
202
+ names given in ``symbols``. The ``Nonterminals`` are sorted
203
+ in the same order as the symbols names.
204
+ :rtype: list(Nonterminal)
205
+ """
206
+ if "," in symbols:
207
+ symbol_list = symbols.split(",")
208
+ else:
209
+ symbol_list = symbols.split()
210
+ return [Nonterminal(s.strip()) for s in symbol_list]
211
+
212
+
213
+ class FeatStructNonterminal(FeatDict, Nonterminal):
214
+ """A feature structure that's also a nonterminal. It acts as its
215
+ own symbol, and automatically freezes itself when hashed."""
216
+
217
+ def __hash__(self):
218
+ self.freeze()
219
+ return FeatStruct.__hash__(self)
220
+
221
+ def symbol(self):
222
+ return self
223
+
224
+
225
+ def is_nonterminal(item):
226
+ """
227
+ :return: True if the item is a ``Nonterminal``.
228
+ :rtype: bool
229
+ """
230
+ return isinstance(item, Nonterminal)
231
+
232
+
233
+ #################################################################
234
+ # Terminals
235
+ #################################################################
236
+
237
+
238
+ def is_terminal(item):
239
+ """
240
+ Return True if the item is a terminal, which currently is
241
+ if it is hashable and not a ``Nonterminal``.
242
+
243
+ :rtype: bool
244
+ """
245
+ return hasattr(item, "__hash__") and not isinstance(item, Nonterminal)
246
+
247
+
248
+ #################################################################
249
+ # Productions
250
+ #################################################################
251
+
252
+
253
+ @total_ordering
254
+ class Production:
255
+ """
256
+ A grammar production. Each production maps a single symbol
257
+ on the "left-hand side" to a sequence of symbols on the
258
+ "right-hand side". (In the case of context-free productions,
259
+ the left-hand side must be a ``Nonterminal``, and the right-hand
260
+ side is a sequence of terminals and ``Nonterminals``.)
261
+ "terminals" can be any immutable hashable object that is
262
+ not a ``Nonterminal``. Typically, terminals are strings
263
+ representing words, such as ``"dog"`` or ``"under"``.
264
+
265
+ :see: ``CFG``
266
+ :see: ``DependencyGrammar``
267
+ :see: ``Nonterminal``
268
+ :type _lhs: Nonterminal
269
+ :ivar _lhs: The left-hand side of the production.
270
+ :type _rhs: tuple(Nonterminal, terminal)
271
+ :ivar _rhs: The right-hand side of the production.
272
+ """
273
+
274
+ def __init__(self, lhs, rhs):
275
+ """
276
+ Construct a new ``Production``.
277
+
278
+ :param lhs: The left-hand side of the new ``Production``.
279
+ :type lhs: Nonterminal
280
+ :param rhs: The right-hand side of the new ``Production``.
281
+ :type rhs: sequence(Nonterminal and terminal)
282
+ """
283
+ if isinstance(rhs, str):
284
+ raise TypeError(
285
+ "production right hand side should be a list, " "not a string"
286
+ )
287
+ self._lhs = lhs
288
+ self._rhs = tuple(rhs)
289
+
290
+ def lhs(self):
291
+ """
292
+ Return the left-hand side of this ``Production``.
293
+
294
+ :rtype: Nonterminal
295
+ """
296
+ return self._lhs
297
+
298
+ def rhs(self):
299
+ """
300
+ Return the right-hand side of this ``Production``.
301
+
302
+ :rtype: sequence(Nonterminal and terminal)
303
+ """
304
+ return self._rhs
305
+
306
+ def __len__(self):
307
+ """
308
+ Return the length of the right-hand side.
309
+
310
+ :rtype: int
311
+ """
312
+ return len(self._rhs)
313
+
314
+ def is_nonlexical(self):
315
+ """
316
+ Return True if the right-hand side only contains ``Nonterminals``
317
+
318
+ :rtype: bool
319
+ """
320
+ return all(is_nonterminal(n) for n in self._rhs)
321
+
322
+ def is_lexical(self):
323
+ """
324
+ Return True if the right-hand contain at least one terminal token.
325
+
326
+ :rtype: bool
327
+ """
328
+ return not self.is_nonlexical()
329
+
330
+ def __str__(self):
331
+ """
332
+ Return a verbose string representation of the ``Production``.
333
+
334
+ :rtype: str
335
+ """
336
+ result = "%s -> " % repr(self._lhs)
337
+ result += " ".join(repr(el) for el in self._rhs)
338
+ return result
339
+
340
+ def __repr__(self):
341
+ """
342
+ Return a concise string representation of the ``Production``.
343
+
344
+ :rtype: str
345
+ """
346
+ return "%s" % self
347
+
348
+ def __eq__(self, other):
349
+ """
350
+ Return True if this ``Production`` is equal to ``other``.
351
+
352
+ :rtype: bool
353
+ """
354
+ return (
355
+ type(self) == type(other)
356
+ and self._lhs == other._lhs
357
+ and self._rhs == other._rhs
358
+ )
359
+
360
+ def __ne__(self, other):
361
+ return not self == other
362
+
363
+ def __lt__(self, other):
364
+ if not isinstance(other, Production):
365
+ raise_unorderable_types("<", self, other)
366
+ return (self._lhs, self._rhs) < (other._lhs, other._rhs)
367
+
368
+ def __hash__(self):
369
+ """
370
+ Return a hash value for the ``Production``.
371
+
372
+ :rtype: int
373
+ """
374
+ return hash((self._lhs, self._rhs))
375
+
376
+
377
+ class DependencyProduction(Production):
378
+ """
379
+ A dependency grammar production. Each production maps a single
380
+ head word to an unordered list of one or more modifier words.
381
+ """
382
+
383
+ def __str__(self):
384
+ """
385
+ Return a verbose string representation of the ``DependencyProduction``.
386
+
387
+ :rtype: str
388
+ """
389
+ result = f"'{self._lhs}' ->"
390
+ for elt in self._rhs:
391
+ result += f" '{elt}'"
392
+ return result
393
+
394
+
395
+ class ProbabilisticProduction(Production, ImmutableProbabilisticMixIn):
396
+ """
397
+ A probabilistic context free grammar production.
398
+ A PCFG ``ProbabilisticProduction`` is essentially just a ``Production`` that
399
+ has an associated probability, which represents how likely it is that
400
+ this production will be used. In particular, the probability of a
401
+ ``ProbabilisticProduction`` records the likelihood that its right-hand side is
402
+ the correct instantiation for any given occurrence of its left-hand side.
403
+
404
+ :see: ``Production``
405
+ """
406
+
407
+ def __init__(self, lhs, rhs, **prob):
408
+ """
409
+ Construct a new ``ProbabilisticProduction``.
410
+
411
+ :param lhs: The left-hand side of the new ``ProbabilisticProduction``.
412
+ :type lhs: Nonterminal
413
+ :param rhs: The right-hand side of the new ``ProbabilisticProduction``.
414
+ :type rhs: sequence(Nonterminal and terminal)
415
+ :param prob: Probability parameters of the new ``ProbabilisticProduction``.
416
+ """
417
+ ImmutableProbabilisticMixIn.__init__(self, **prob)
418
+ Production.__init__(self, lhs, rhs)
419
+
420
+ def __str__(self):
421
+ return super().__str__() + (
422
+ " [1.0]" if (self.prob() == 1.0) else " [%g]" % self.prob()
423
+ )
424
+
425
+ def __eq__(self, other):
426
+ return (
427
+ type(self) == type(other)
428
+ and self._lhs == other._lhs
429
+ and self._rhs == other._rhs
430
+ and self.prob() == other.prob()
431
+ )
432
+
433
+ def __ne__(self, other):
434
+ return not self == other
435
+
436
+ def __hash__(self):
437
+ return hash((self._lhs, self._rhs, self.prob()))
438
+
439
+
440
+ #################################################################
441
+ # Grammars
442
+ #################################################################
443
+
444
+
445
+ class CFG:
446
+ """
447
+ A context-free grammar. A grammar consists of a start state and
448
+ a set of productions. The set of terminals and nonterminals is
449
+ implicitly specified by the productions.
450
+
451
+ If you need efficient key-based access to productions, you
452
+ can use a subclass to implement it.
453
+ """
454
+
455
+ def __init__(self, start, productions, calculate_leftcorners=True):
456
+ """
457
+ Create a new context-free grammar, from the given start state
458
+ and set of ``Production`` instances.
459
+
460
+ :param start: The start symbol
461
+ :type start: Nonterminal
462
+ :param productions: The list of productions that defines the grammar
463
+ :type productions: list(Production)
464
+ :param calculate_leftcorners: False if we don't want to calculate the
465
+ leftcorner relation. In that case, some optimized chart parsers won't work.
466
+ :type calculate_leftcorners: bool
467
+ """
468
+ if not is_nonterminal(start):
469
+ raise TypeError(
470
+ "start should be a Nonterminal object,"
471
+ " not a %s" % type(start).__name__
472
+ )
473
+
474
+ self._start = start
475
+ self._productions = productions
476
+ self._categories = {prod.lhs() for prod in productions}
477
+ self._calculate_indexes()
478
+ self._calculate_grammar_forms()
479
+ if calculate_leftcorners:
480
+ self._calculate_leftcorners()
481
+
482
+ def _calculate_indexes(self):
483
+ self._lhs_index = {}
484
+ self._rhs_index = {}
485
+ self._empty_index = {}
486
+ self._lexical_index = {}
487
+ for prod in self._productions:
488
+ # Left hand side.
489
+ lhs = prod._lhs
490
+ if lhs not in self._lhs_index:
491
+ self._lhs_index[lhs] = []
492
+ self._lhs_index[lhs].append(prod)
493
+ if prod._rhs:
494
+ # First item in right hand side.
495
+ rhs0 = prod._rhs[0]
496
+ if rhs0 not in self._rhs_index:
497
+ self._rhs_index[rhs0] = []
498
+ self._rhs_index[rhs0].append(prod)
499
+ else:
500
+ # The right hand side is empty.
501
+ self._empty_index[prod.lhs()] = prod
502
+ # Lexical tokens in the right hand side.
503
+ for token in prod._rhs:
504
+ if is_terminal(token):
505
+ self._lexical_index.setdefault(token, set()).add(prod)
506
+
507
+ def _calculate_leftcorners(self):
508
+ # Calculate leftcorner relations, for use in optimized parsing.
509
+ self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
510
+ self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
511
+ for prod in self.productions():
512
+ if len(prod) > 0:
513
+ cat, left = prod.lhs(), prod.rhs()[0]
514
+ if is_nonterminal(left):
515
+ self._immediate_leftcorner_categories[cat].add(left)
516
+ else:
517
+ self._immediate_leftcorner_words[cat].add(left)
518
+
519
+ lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
520
+ self._leftcorners = lc
521
+ self._leftcorner_parents = invert_graph(lc)
522
+
523
+ nr_leftcorner_categories = sum(
524
+ map(len, self._immediate_leftcorner_categories.values())
525
+ )
526
+ nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
527
+ if nr_leftcorner_words > nr_leftcorner_categories > 10000:
528
+ # If the grammar is big, the leftcorner-word dictionary will be too large.
529
+ # In that case it is better to calculate the relation on demand.
530
+ self._leftcorner_words = None
531
+ return
532
+
533
+ self._leftcorner_words = {}
534
+ for cat in self._leftcorners:
535
+ lefts = self._leftcorners[cat]
536
+ lc = self._leftcorner_words[cat] = set()
537
+ for left in lefts:
538
+ lc.update(self._immediate_leftcorner_words.get(left, set()))
539
+
540
+ @classmethod
541
+ def fromstring(cls, input, encoding=None):
542
+ """
543
+ Return the grammar instance corresponding to the input string(s).
544
+
545
+ :param input: a grammar, either in the form of a string or as a list of strings.
546
+ """
547
+ start, productions = read_grammar(
548
+ input, standard_nonterm_parser, encoding=encoding
549
+ )
550
+ return cls(start, productions)
551
+
552
+ def start(self):
553
+ """
554
+ Return the start symbol of the grammar
555
+
556
+ :rtype: Nonterminal
557
+ """
558
+ return self._start
559
+
560
+ # tricky to balance readability and efficiency here!
561
+ # can't use set operations as they don't preserve ordering
562
+ def productions(self, lhs=None, rhs=None, empty=False):
563
+ """
564
+ Return the grammar productions, filtered by the left-hand side
565
+ or the first item in the right-hand side.
566
+
567
+ :param lhs: Only return productions with the given left-hand side.
568
+ :param rhs: Only return productions with the given first item
569
+ in the right-hand side.
570
+ :param empty: Only return productions with an empty right-hand side.
571
+ :return: A list of productions matching the given constraints.
572
+ :rtype: list(Production)
573
+ """
574
+ if rhs and empty:
575
+ raise ValueError(
576
+ "You cannot select empty and non-empty " "productions at the same time."
577
+ )
578
+
579
+ # no constraints so return everything
580
+ if not lhs and not rhs:
581
+ if not empty:
582
+ return self._productions
583
+ else:
584
+ return self._empty_index.values()
585
+
586
+ # only lhs specified so look up its index
587
+ elif lhs and not rhs:
588
+ if not empty:
589
+ return self._lhs_index.get(lhs, [])
590
+ elif lhs in self._empty_index:
591
+ return [self._empty_index[lhs]]
592
+ else:
593
+ return []
594
+
595
+ # only rhs specified so look up its index
596
+ elif rhs and not lhs:
597
+ return self._rhs_index.get(rhs, [])
598
+
599
+ # intersect
600
+ else:
601
+ return [
602
+ prod
603
+ for prod in self._lhs_index.get(lhs, [])
604
+ if prod in self._rhs_index.get(rhs, [])
605
+ ]
606
+
607
+ def leftcorners(self, cat):
608
+ """
609
+ Return the set of all nonterminals that the given nonterminal
610
+ can start with, including itself.
611
+
612
+ This is the reflexive, transitive closure of the immediate
613
+ leftcorner relation: (A > B) iff (A -> B beta)
614
+
615
+ :param cat: the parent of the leftcorners
616
+ :type cat: Nonterminal
617
+ :return: the set of all leftcorners
618
+ :rtype: set(Nonterminal)
619
+ """
620
+ return self._leftcorners.get(cat, {cat})
621
+
622
+ def is_leftcorner(self, cat, left):
623
+ """
624
+ True if left is a leftcorner of cat, where left can be a
625
+ terminal or a nonterminal.
626
+
627
+ :param cat: the parent of the leftcorner
628
+ :type cat: Nonterminal
629
+ :param left: the suggested leftcorner
630
+ :type left: Terminal or Nonterminal
631
+ :rtype: bool
632
+ """
633
+ if is_nonterminal(left):
634
+ return left in self.leftcorners(cat)
635
+ elif self._leftcorner_words:
636
+ return left in self._leftcorner_words.get(cat, set())
637
+ else:
638
+ return any(
639
+ left in self._immediate_leftcorner_words.get(parent, set())
640
+ for parent in self.leftcorners(cat)
641
+ )
642
+
643
+ def leftcorner_parents(self, cat):
644
+ """
645
+ Return the set of all nonterminals for which the given category
646
+ is a left corner. This is the inverse of the leftcorner relation.
647
+
648
+ :param cat: the suggested leftcorner
649
+ :type cat: Nonterminal
650
+ :return: the set of all parents to the leftcorner
651
+ :rtype: set(Nonterminal)
652
+ """
653
+ return self._leftcorner_parents.get(cat, {cat})
654
+
655
+ def check_coverage(self, tokens):
656
+ """
657
+ Check whether the grammar rules cover the given list of tokens.
658
+ If not, then raise an exception.
659
+
660
+ :type tokens: list(str)
661
+ """
662
+ missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
663
+ if missing:
664
+ missing = ", ".join(f"{w!r}" for w in missing)
665
+ raise ValueError(
666
+ "Grammar does not cover some of the " "input words: %r." % missing
667
+ )
668
+
669
+ def _calculate_grammar_forms(self):
670
+ """
671
+ Pre-calculate of which form(s) the grammar is.
672
+ """
673
+ prods = self._productions
674
+ self._is_lexical = all(p.is_lexical() for p in prods)
675
+ self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
676
+ self._min_len = min(len(p) for p in prods)
677
+ self._max_len = max(len(p) for p in prods)
678
+ self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
679
+
680
+ def is_lexical(self):
681
+ """
682
+ Return True if all productions are lexicalised.
683
+ """
684
+ return self._is_lexical
685
+
686
+ def is_nonlexical(self):
687
+ """
688
+ Return True if all lexical rules are "preterminals", that is,
689
+ unary rules which can be separated in a preprocessing step.
690
+
691
+ This means that all productions are of the forms
692
+ A -> B1 ... Bn (n>=0), or A -> "s".
693
+
694
+ Note: is_lexical() and is_nonlexical() are not opposites.
695
+ There are grammars which are neither, and grammars which are both.
696
+ """
697
+ return self._is_nonlexical
698
+
699
+ def min_len(self):
700
+ """
701
+ Return the right-hand side length of the shortest grammar production.
702
+ """
703
+ return self._min_len
704
+
705
+ def max_len(self):
706
+ """
707
+ Return the right-hand side length of the longest grammar production.
708
+ """
709
+ return self._max_len
710
+
711
+ def is_nonempty(self):
712
+ """
713
+ Return True if there are no empty productions.
714
+ """
715
+ return self._min_len > 0
716
+
717
+ def is_binarised(self):
718
+ """
719
+ Return True if all productions are at most binary.
720
+ Note that there can still be empty and unary productions.
721
+ """
722
+ return self._max_len <= 2
723
+
724
+ def is_flexible_chomsky_normal_form(self):
725
+ """
726
+ Return True if all productions are of the forms
727
+ A -> B C, A -> B, or A -> "s".
728
+ """
729
+ return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
730
+
731
+ def is_chomsky_normal_form(self):
732
+ """
733
+ Return True if the grammar is of Chomsky Normal Form, i.e. all productions
734
+ are of the form A -> B C, or A -> "s".
735
+ """
736
+ return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
737
+
738
+ def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
739
+ """
740
+ Returns a new Grammar that is in chomsky normal
741
+
742
+ :param: new_token_padding
743
+ Customise new rule formation during binarisation
744
+ """
745
+ if self.is_chomsky_normal_form():
746
+ return self
747
+ if self.productions(empty=True):
748
+ raise ValueError(
749
+ "Grammar has Empty rules. " "Cannot deal with them at the moment"
750
+ )
751
+
752
+ # check for mixed rules
753
+ for rule in self.productions():
754
+ if rule.is_lexical() and len(rule.rhs()) > 1:
755
+ raise ValueError(
756
+ f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
757
+ )
758
+
759
+ step1 = CFG.eliminate_start(self)
760
+ step2 = CFG.binarize(step1, new_token_padding)
761
+ if flexible:
762
+ return step2
763
+ step3 = CFG.remove_unitary_rules(step2)
764
+ step4 = CFG(step3.start(), list(set(step3.productions())))
765
+ return step4
766
+
767
+ @classmethod
768
+ def remove_unitary_rules(cls, grammar):
769
+ """
770
+ Remove nonlexical unitary rules and convert them to
771
+ lexical
772
+ """
773
+ result = []
774
+ unitary = []
775
+ for rule in grammar.productions():
776
+ if len(rule) == 1 and rule.is_nonlexical():
777
+ unitary.append(rule)
778
+ else:
779
+ result.append(rule)
780
+
781
+ while unitary:
782
+ rule = unitary.pop(0)
783
+ for item in grammar.productions(lhs=rule.rhs()[0]):
784
+ new_rule = Production(rule.lhs(), item.rhs())
785
+ if len(new_rule) != 1 or new_rule.is_lexical():
786
+ result.append(new_rule)
787
+ else:
788
+ unitary.append(new_rule)
789
+
790
+ n_grammar = CFG(grammar.start(), result)
791
+ return n_grammar
792
+
793
+ @classmethod
794
+ def binarize(cls, grammar, padding="@$@"):
795
+ """
796
+ Convert all non-binary rules into binary by introducing
797
+ new tokens.
798
+ Example::
799
+
800
+ Original:
801
+ A => B C D
802
+ After Conversion:
803
+ A => B A@$@B
804
+ A@$@B => C D
805
+ """
806
+ result = []
807
+
808
+ for rule in grammar.productions():
809
+ if len(rule.rhs()) > 2:
810
+ # this rule needs to be broken down
811
+ left_side = rule.lhs()
812
+ for k in range(0, len(rule.rhs()) - 2):
813
+ tsym = rule.rhs()[k]
814
+ new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
815
+ new_production = Production(left_side, (tsym, new_sym))
816
+ left_side = new_sym
817
+ result.append(new_production)
818
+ last_prd = Production(left_side, rule.rhs()[-2:])
819
+ result.append(last_prd)
820
+ else:
821
+ result.append(rule)
822
+
823
+ n_grammar = CFG(grammar.start(), result)
824
+ return n_grammar
825
+
826
+ @classmethod
827
+ def eliminate_start(cls, grammar):
828
+ """
829
+ Eliminate start rule in case it appears on RHS
830
+ Example: S -> S0 S1 and S0 -> S1 S
831
+ Then another rule S0_Sigma -> S is added
832
+ """
833
+ start = grammar.start()
834
+ result = []
835
+ need_to_add = None
836
+ for rule in grammar.productions():
837
+ if start in rule.rhs():
838
+ need_to_add = True
839
+ result.append(rule)
840
+ if need_to_add:
841
+ start = Nonterminal("S0_SIGMA")
842
+ result.append(Production(start, [grammar.start()]))
843
+ n_grammar = CFG(start, result)
844
+ return n_grammar
845
+ return grammar
846
+
847
+ def __repr__(self):
848
+ return "<Grammar with %d productions>" % len(self._productions)
849
+
850
+ def __str__(self):
851
+ result = "Grammar with %d productions" % len(self._productions)
852
+ result += " (start state = %r)" % self._start
853
+ for production in self._productions:
854
+ result += "\n %s" % production
855
+ return result
856
+
857
+
858
+ class FeatureGrammar(CFG):
859
+ """
860
+ A feature-based grammar. This is equivalent to a
861
+ ``CFG`` whose nonterminals are all
862
+ ``FeatStructNonterminal``.
863
+
864
+ A grammar consists of a start state and a set of
865
+ productions. The set of terminals and nonterminals
866
+ is implicitly specified by the productions.
867
+ """
868
+
869
+ def __init__(self, start, productions):
870
+ """
871
+ Create a new feature-based grammar, from the given start
872
+ state and set of ``Productions``.
873
+
874
+ :param start: The start symbol
875
+ :type start: FeatStructNonterminal
876
+ :param productions: The list of productions that defines the grammar
877
+ :type productions: list(Production)
878
+ """
879
+ CFG.__init__(self, start, productions)
880
+
881
+ # The difference with CFG is that the productions are
882
+ # indexed on the TYPE feature of the nonterminals.
883
+ # This is calculated by the method _get_type_if_possible().
884
+
885
+ def _calculate_indexes(self):
886
+ self._lhs_index = {}
887
+ self._rhs_index = {}
888
+ self._empty_index = {}
889
+ self._empty_productions = []
890
+ self._lexical_index = {}
891
+ for prod in self._productions:
892
+ # Left hand side.
893
+ lhs = self._get_type_if_possible(prod._lhs)
894
+ if lhs not in self._lhs_index:
895
+ self._lhs_index[lhs] = []
896
+ self._lhs_index[lhs].append(prod)
897
+ if prod._rhs:
898
+ # First item in right hand side.
899
+ rhs0 = self._get_type_if_possible(prod._rhs[0])
900
+ if rhs0 not in self._rhs_index:
901
+ self._rhs_index[rhs0] = []
902
+ self._rhs_index[rhs0].append(prod)
903
+ else:
904
+ # The right hand side is empty.
905
+ if lhs not in self._empty_index:
906
+ self._empty_index[lhs] = []
907
+ self._empty_index[lhs].append(prod)
908
+ self._empty_productions.append(prod)
909
+ # Lexical tokens in the right hand side.
910
+ for token in prod._rhs:
911
+ if is_terminal(token):
912
+ self._lexical_index.setdefault(token, set()).add(prod)
913
+
914
+ @classmethod
915
+ def fromstring(
916
+ cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None
917
+ ):
918
+ """
919
+ Return a feature structure based grammar.
920
+
921
+ :param input: a grammar, either in the form of a string or else
922
+ as a list of strings.
923
+ :param features: a tuple of features (default: SLASH, TYPE)
924
+ :param logic_parser: a parser for lambda-expressions,
925
+ by default, ``LogicParser()``
926
+ :param fstruct_reader: a feature structure parser
927
+ (only if features and logic_parser is None)
928
+ """
929
+ if features is None:
930
+ features = (SLASH, TYPE)
931
+
932
+ if fstruct_reader is None:
933
+ fstruct_reader = FeatStructReader(
934
+ features, FeatStructNonterminal, logic_parser=logic_parser
935
+ )
936
+ elif logic_parser is not None:
937
+ raise Exception(
938
+ "'logic_parser' and 'fstruct_reader' must " "not both be set"
939
+ )
940
+
941
+ start, productions = read_grammar(
942
+ input, fstruct_reader.read_partial, encoding=encoding
943
+ )
944
+ return cls(start, productions)
945
+
946
+ def productions(self, lhs=None, rhs=None, empty=False):
947
+ """
948
+ Return the grammar productions, filtered by the left-hand side
949
+ or the first item in the right-hand side.
950
+
951
+ :param lhs: Only return productions with the given left-hand side.
952
+ :param rhs: Only return productions with the given first item
953
+ in the right-hand side.
954
+ :param empty: Only return productions with an empty right-hand side.
955
+ :rtype: list(Production)
956
+ """
957
+ if rhs and empty:
958
+ raise ValueError(
959
+ "You cannot select empty and non-empty " "productions at the same time."
960
+ )
961
+
962
+ # no constraints so return everything
963
+ if not lhs and not rhs:
964
+ if empty:
965
+ return self._empty_productions
966
+ else:
967
+ return self._productions
968
+
969
+ # only lhs specified so look up its index
970
+ elif lhs and not rhs:
971
+ if empty:
972
+ return self._empty_index.get(self._get_type_if_possible(lhs), [])
973
+ else:
974
+ return self._lhs_index.get(self._get_type_if_possible(lhs), [])
975
+
976
+ # only rhs specified so look up its index
977
+ elif rhs and not lhs:
978
+ return self._rhs_index.get(self._get_type_if_possible(rhs), [])
979
+
980
+ # intersect
981
+ else:
982
+ return [
983
+ prod
984
+ for prod in self._lhs_index.get(self._get_type_if_possible(lhs), [])
985
+ if prod in self._rhs_index.get(self._get_type_if_possible(rhs), [])
986
+ ]
987
+
988
+ def leftcorners(self, cat):
989
+ """
990
+ Return the set of all words that the given category can start with.
991
+ Also called the "first set" in compiler construction.
992
+ """
993
+ raise NotImplementedError("Not implemented yet")
994
+
995
+ def leftcorner_parents(self, cat):
996
+ """
997
+ Return the set of all categories for which the given category
998
+ is a left corner.
999
+ """
1000
+ raise NotImplementedError("Not implemented yet")
1001
+
1002
+ def _get_type_if_possible(self, item):
1003
+ """
1004
+ Helper function which returns the ``TYPE`` feature of the ``item``,
1005
+ if it exists, otherwise it returns the ``item`` itself
1006
+ """
1007
+ if isinstance(item, dict) and TYPE in item:
1008
+ return FeatureValueType(item[TYPE])
1009
+ else:
1010
+ return item
1011
+
1012
+
1013
+ @total_ordering
1014
+ class FeatureValueType:
1015
+ """
1016
+ A helper class for ``FeatureGrammars``, designed to be different
1017
+ from ordinary strings. This is to stop the ``FeatStruct``
1018
+ ``FOO[]`` from being compare equal to the terminal "FOO".
1019
+ """
1020
+
1021
+ def __init__(self, value):
1022
+ self._value = value
1023
+
1024
+ def __repr__(self):
1025
+ return "<%s>" % self._value
1026
+
1027
+ def __eq__(self, other):
1028
+ return type(self) == type(other) and self._value == other._value
1029
+
1030
+ def __ne__(self, other):
1031
+ return not self == other
1032
+
1033
+ def __lt__(self, other):
1034
+ if not isinstance(other, FeatureValueType):
1035
+ raise_unorderable_types("<", self, other)
1036
+ return self._value < other._value
1037
+
1038
+ def __hash__(self):
1039
+ return hash(self._value)
1040
+
1041
+
1042
+ class DependencyGrammar:
1043
+ """
1044
+ A dependency grammar. A DependencyGrammar consists of a set of
1045
+ productions. Each production specifies a head/modifier relationship
1046
+ between a pair of words.
1047
+ """
1048
+
1049
+ def __init__(self, productions):
1050
+ """
1051
+ Create a new dependency grammar, from the set of ``Productions``.
1052
+
1053
+ :param productions: The list of productions that defines the grammar
1054
+ :type productions: list(Production)
1055
+ """
1056
+ self._productions = productions
1057
+
1058
+ @classmethod
1059
+ def fromstring(cls, input):
1060
+ productions = []
1061
+ for linenum, line in enumerate(input.split("\n")):
1062
+ line = line.strip()
1063
+ if line.startswith("#") or line == "":
1064
+ continue
1065
+ try:
1066
+ productions += _read_dependency_production(line)
1067
+ except ValueError as e:
1068
+ raise ValueError(f"Unable to parse line {linenum}: {line}") from e
1069
+ if len(productions) == 0:
1070
+ raise ValueError("No productions found!")
1071
+ return cls(productions)
1072
+
1073
+ def contains(self, head, mod):
1074
+ """
1075
+ :param head: A head word.
1076
+ :type head: str
1077
+ :param mod: A mod word, to test as a modifier of 'head'.
1078
+ :type mod: str
1079
+
1080
+ :return: true if this ``DependencyGrammar`` contains a
1081
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1082
+ :rtype: bool
1083
+ """
1084
+ for production in self._productions:
1085
+ for possibleMod in production._rhs:
1086
+ if production._lhs == head and possibleMod == mod:
1087
+ return True
1088
+ return False
1089
+
1090
+ def __contains__(self, head_mod):
1091
+ """
1092
+ Return True if this ``DependencyGrammar`` contains a
1093
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1094
+
1095
+ :param head_mod: A tuple of a head word and a mod word,
1096
+ to test as a modifier of 'head'.
1097
+ :type head: Tuple[str, str]
1098
+ :rtype: bool
1099
+ """
1100
+ try:
1101
+ head, mod = head_mod
1102
+ except ValueError as e:
1103
+ raise ValueError(
1104
+ "Must use a tuple of strings, e.g. `('price', 'of') in grammar`"
1105
+ ) from e
1106
+ return self.contains(head, mod)
1107
+
1108
+ # # should be rewritten, the set comp won't work in all comparisons
1109
+ # def contains_exactly(self, head, modlist):
1110
+ # for production in self._productions:
1111
+ # if(len(production._rhs) == len(modlist)):
1112
+ # if(production._lhs == head):
1113
+ # set1 = Set(production._rhs)
1114
+ # set2 = Set(modlist)
1115
+ # if(set1 == set2):
1116
+ # return True
1117
+ # return False
1118
+
1119
+ def __str__(self):
1120
+ """
1121
+ Return a verbose string representation of the ``DependencyGrammar``
1122
+
1123
+ :rtype: str
1124
+ """
1125
+ str = "Dependency grammar with %d productions" % len(self._productions)
1126
+ for production in self._productions:
1127
+ str += "\n %s" % production
1128
+ return str
1129
+
1130
+ def __repr__(self):
1131
+ """
1132
+ Return a concise string representation of the ``DependencyGrammar``
1133
+ """
1134
+ return "Dependency grammar with %d productions" % len(self._productions)
1135
+
1136
+
1137
+ class ProbabilisticDependencyGrammar:
1138
+ """ """
1139
+
1140
+ def __init__(self, productions, events, tags):
1141
+ self._productions = productions
1142
+ self._events = events
1143
+ self._tags = tags
1144
+
1145
+ def contains(self, head, mod):
1146
+ """
1147
+ Return True if this ``DependencyGrammar`` contains a
1148
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1149
+
1150
+ :param head: A head word.
1151
+ :type head: str
1152
+ :param mod: A mod word, to test as a modifier of 'head'.
1153
+ :type mod: str
1154
+ :rtype: bool
1155
+ """
1156
+ for production in self._productions:
1157
+ for possibleMod in production._rhs:
1158
+ if production._lhs == head and possibleMod == mod:
1159
+ return True
1160
+ return False
1161
+
1162
+ def __str__(self):
1163
+ """
1164
+ Return a verbose string representation of the ``ProbabilisticDependencyGrammar``
1165
+
1166
+ :rtype: str
1167
+ """
1168
+ str = "Statistical dependency grammar with %d productions" % len(
1169
+ self._productions
1170
+ )
1171
+ for production in self._productions:
1172
+ str += "\n %s" % production
1173
+ str += "\nEvents:"
1174
+ for event in self._events:
1175
+ str += "\n %d:%s" % (self._events[event], event)
1176
+ str += "\nTags:"
1177
+ for tag_word in self._tags:
1178
+ str += f"\n {tag_word}:\t({self._tags[tag_word]})"
1179
+ return str
1180
+
1181
+ def __repr__(self):
1182
+ """
1183
+ Return a concise string representation of the ``ProbabilisticDependencyGrammar``
1184
+ """
1185
+ return "Statistical Dependency grammar with %d productions" % len(
1186
+ self._productions
1187
+ )
1188
+
1189
+
1190
+ class PCFG(CFG):
1191
+ """
1192
+ A probabilistic context-free grammar. A PCFG consists of a
1193
+ start state and a set of productions with probabilities. The set of
1194
+ terminals and nonterminals is implicitly specified by the productions.
1195
+
1196
+ PCFG productions use the ``ProbabilisticProduction`` class.
1197
+ ``PCFGs`` impose the constraint that the set of productions with
1198
+ any given left-hand-side must have probabilities that sum to 1
1199
+ (allowing for a small margin of error).
1200
+
1201
+ If you need efficient key-based access to productions, you can use
1202
+ a subclass to implement it.
1203
+
1204
+ :type EPSILON: float
1205
+ :cvar EPSILON: The acceptable margin of error for checking that
1206
+ productions with a given left-hand side have probabilities
1207
+ that sum to 1.
1208
+ """
1209
+
1210
+ EPSILON = 0.01
1211
+
1212
+ def __init__(self, start, productions, calculate_leftcorners=True):
1213
+ """
1214
+ Create a new context-free grammar, from the given start state
1215
+ and set of ``ProbabilisticProductions``.
1216
+
1217
+ :param start: The start symbol
1218
+ :type start: Nonterminal
1219
+ :param productions: The list of productions that defines the grammar
1220
+ :type productions: list(Production)
1221
+ :raise ValueError: if the set of productions with any left-hand-side
1222
+ do not have probabilities that sum to a value within
1223
+ EPSILON of 1.
1224
+ :param calculate_leftcorners: False if we don't want to calculate the
1225
+ leftcorner relation. In that case, some optimized chart parsers won't work.
1226
+ :type calculate_leftcorners: bool
1227
+ """
1228
+ CFG.__init__(self, start, productions, calculate_leftcorners)
1229
+
1230
+ # Make sure that the probabilities sum to one.
1231
+ probs = {}
1232
+ for production in productions:
1233
+ probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob()
1234
+ for (lhs, p) in probs.items():
1235
+ if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)):
1236
+ raise ValueError("Productions for %r do not sum to 1" % lhs)
1237
+
1238
+ @classmethod
1239
+ def fromstring(cls, input, encoding=None):
1240
+ """
1241
+ Return a probabilistic context-free grammar corresponding to the
1242
+ input string(s).
1243
+
1244
+ :param input: a grammar, either in the form of a string or else
1245
+ as a list of strings.
1246
+ """
1247
+ start, productions = read_grammar(
1248
+ input, standard_nonterm_parser, probabilistic=True, encoding=encoding
1249
+ )
1250
+ return cls(start, productions)
1251
+
1252
+
1253
+ #################################################################
1254
+ # Inducing Grammars
1255
+ #################################################################
1256
+
1257
+ # Contributed by Nathan Bodenstab <[email protected]>
1258
+
1259
+
1260
+ def induce_pcfg(start, productions):
1261
+ r"""
1262
+ Induce a PCFG grammar from a list of productions.
1263
+
1264
+ The probability of a production A -> B C in a PCFG is:
1265
+
1266
+ | count(A -> B C)
1267
+ | P(B, C | A) = --------------- where \* is any right hand side
1268
+ | count(A -> \*)
1269
+
1270
+ :param start: The start symbol
1271
+ :type start: Nonterminal
1272
+ :param productions: The list of productions that defines the grammar
1273
+ :type productions: list(Production)
1274
+ """
1275
+ # Production count: the number of times a given production occurs
1276
+ pcount = {}
1277
+
1278
+ # LHS-count: counts the number of times a given lhs occurs
1279
+ lcount = {}
1280
+
1281
+ for prod in productions:
1282
+ lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
1283
+ pcount[prod] = pcount.get(prod, 0) + 1
1284
+
1285
+ prods = [
1286
+ ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])
1287
+ for p in pcount
1288
+ ]
1289
+ return PCFG(start, prods)
1290
+
1291
+
1292
+ #################################################################
1293
+ # Helper functions for reading productions
1294
+ #################################################################
1295
+
1296
+
1297
+ def _read_cfg_production(input):
1298
+ """
1299
+ Return a list of context-free ``Productions``.
1300
+ """
1301
+ return _read_production(input, standard_nonterm_parser)
1302
+
1303
+
1304
+ def _read_pcfg_production(input):
1305
+ """
1306
+ Return a list of PCFG ``ProbabilisticProductions``.
1307
+ """
1308
+ return _read_production(input, standard_nonterm_parser, probabilistic=True)
1309
+
1310
+
1311
+ def _read_fcfg_production(input, fstruct_reader):
1312
+ """
1313
+ Return a list of feature-based ``Productions``.
1314
+ """
1315
+ return _read_production(input, fstruct_reader)
1316
+
1317
+
1318
+ # Parsing generic grammars
1319
+
1320
+ _ARROW_RE = re.compile(r"\s* -> \s*", re.VERBOSE)
1321
+ _PROBABILITY_RE = re.compile(r"( \[ [\d\.]+ \] ) \s*", re.VERBOSE)
1322
+ _TERMINAL_RE = re.compile(r'( "[^"]*" | \'[^\']*\' ) \s*', re.VERBOSE)
1323
+ _DISJUNCTION_RE = re.compile(r"\| \s*", re.VERBOSE)
1324
+
1325
+
1326
+ def _read_production(line, nonterm_parser, probabilistic=False):
1327
+ """
1328
+ Parse a grammar rule, given as a string, and return
1329
+ a list of productions.
1330
+ """
1331
+ pos = 0
1332
+
1333
+ # Parse the left-hand side.
1334
+ lhs, pos = nonterm_parser(line, pos)
1335
+
1336
+ # Skip over the arrow.
1337
+ m = _ARROW_RE.match(line, pos)
1338
+ if not m:
1339
+ raise ValueError("Expected an arrow")
1340
+ pos = m.end()
1341
+
1342
+ # Parse the right hand side.
1343
+ probabilities = [0.0]
1344
+ rhsides = [[]]
1345
+ while pos < len(line):
1346
+ # Probability.
1347
+ m = _PROBABILITY_RE.match(line, pos)
1348
+ if probabilistic and m:
1349
+ pos = m.end()
1350
+ probabilities[-1] = float(m.group(1)[1:-1])
1351
+ if probabilities[-1] > 1.0:
1352
+ raise ValueError(
1353
+ "Production probability %f, "
1354
+ "should not be greater than 1.0" % (probabilities[-1],)
1355
+ )
1356
+
1357
+ # String -- add terminal.
1358
+ elif line[pos] in "'\"":
1359
+ m = _TERMINAL_RE.match(line, pos)
1360
+ if not m:
1361
+ raise ValueError("Unterminated string")
1362
+ rhsides[-1].append(m.group(1)[1:-1])
1363
+ pos = m.end()
1364
+
1365
+ # Vertical bar -- start new rhside.
1366
+ elif line[pos] == "|":
1367
+ m = _DISJUNCTION_RE.match(line, pos)
1368
+ probabilities.append(0.0)
1369
+ rhsides.append([])
1370
+ pos = m.end()
1371
+
1372
+ # Anything else -- nonterminal.
1373
+ else:
1374
+ nonterm, pos = nonterm_parser(line, pos)
1375
+ rhsides[-1].append(nonterm)
1376
+
1377
+ if probabilistic:
1378
+ return [
1379
+ ProbabilisticProduction(lhs, rhs, prob=probability)
1380
+ for (rhs, probability) in zip(rhsides, probabilities)
1381
+ ]
1382
+ else:
1383
+ return [Production(lhs, rhs) for rhs in rhsides]
1384
+
1385
+
1386
+ #################################################################
1387
+ # Reading Phrase Structure Grammars
1388
+ #################################################################
1389
+
1390
+
1391
+ def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None):
1392
+ """
1393
+ Return a pair consisting of a starting category and a list of
1394
+ ``Productions``.
1395
+
1396
+ :param input: a grammar, either in the form of a string or else
1397
+ as a list of strings.
1398
+ :param nonterm_parser: a function for parsing nonterminals.
1399
+ It should take a ``(string, position)`` as argument and
1400
+ return a ``(nonterminal, position)`` as result.
1401
+ :param probabilistic: are the grammar rules probabilistic?
1402
+ :type probabilistic: bool
1403
+ :param encoding: the encoding of the grammar, if it is a binary string
1404
+ :type encoding: str
1405
+ """
1406
+ if encoding is not None:
1407
+ input = input.decode(encoding)
1408
+ if isinstance(input, str):
1409
+ lines = input.split("\n")
1410
+ else:
1411
+ lines = input
1412
+
1413
+ start = None
1414
+ productions = []
1415
+ continue_line = ""
1416
+ for linenum, line in enumerate(lines):
1417
+ line = continue_line + line.strip()
1418
+ if line.startswith("#") or line == "":
1419
+ continue
1420
+ if line.endswith("\\"):
1421
+ continue_line = line[:-1].rstrip() + " "
1422
+ continue
1423
+ continue_line = ""
1424
+ try:
1425
+ if line[0] == "%":
1426
+ directive, args = line[1:].split(None, 1)
1427
+ if directive == "start":
1428
+ start, pos = nonterm_parser(args, 0)
1429
+ if pos != len(args):
1430
+ raise ValueError("Bad argument to start directive")
1431
+ else:
1432
+ raise ValueError("Bad directive")
1433
+ else:
1434
+ # expand out the disjunctions on the RHS
1435
+ productions += _read_production(line, nonterm_parser, probabilistic)
1436
+ except ValueError as e:
1437
+ raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e
1438
+
1439
+ if not productions:
1440
+ raise ValueError("No productions found!")
1441
+ if not start:
1442
+ start = productions[0].lhs()
1443
+ return (start, productions)
1444
+
1445
+
1446
+ _STANDARD_NONTERM_RE = re.compile(r"( [\w/][\w/^<>-]* ) \s*", re.VERBOSE)
1447
+
1448
+
1449
+ def standard_nonterm_parser(string, pos):
1450
+ m = _STANDARD_NONTERM_RE.match(string, pos)
1451
+ if not m:
1452
+ raise ValueError("Expected a nonterminal, found: " + string[pos:])
1453
+ return (Nonterminal(m.group(1)), m.end())
1454
+
1455
+
1456
+ #################################################################
1457
+ # Reading Dependency Grammars
1458
+ #################################################################
1459
+
1460
+ _READ_DG_RE = re.compile(
1461
+ r"""^\s* # leading whitespace
1462
+ ('[^']+')\s* # single-quoted lhs
1463
+ (?:[-=]+>)\s* # arrow
1464
+ (?:( # rhs:
1465
+ "[^"]+" # doubled-quoted terminal
1466
+ | '[^']+' # single-quoted terminal
1467
+ | \| # disjunction
1468
+ )
1469
+ \s*) # trailing space
1470
+ *$""", # zero or more copies
1471
+ re.VERBOSE,
1472
+ )
1473
+ _SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""")
1474
+
1475
+
1476
+ def _read_dependency_production(s):
1477
+ if not _READ_DG_RE.match(s):
1478
+ raise ValueError("Bad production string")
1479
+ pieces = _SPLIT_DG_RE.split(s)
1480
+ pieces = [p for i, p in enumerate(pieces) if i % 2 == 1]
1481
+ lhside = pieces[0].strip("'\"")
1482
+ rhsides = [[]]
1483
+ for piece in pieces[2:]:
1484
+ if piece == "|":
1485
+ rhsides.append([])
1486
+ else:
1487
+ rhsides[-1].append(piece.strip("'\""))
1488
+ return [DependencyProduction(lhside, rhside) for rhside in rhsides]
1489
+
1490
+
1491
+ #################################################################
1492
+ # Demonstration
1493
+ #################################################################
1494
+
1495
+
1496
+ def cfg_demo():
1497
+ """
1498
+ A demonstration showing how ``CFGs`` can be created and used.
1499
+ """
1500
+
1501
+ from nltk import CFG, Production, nonterminals
1502
+
1503
+ # Create some nonterminals
1504
+ S, NP, VP, PP = nonterminals("S, NP, VP, PP")
1505
+ N, V, P, Det = nonterminals("N, V, P, Det")
1506
+ VP_slash_NP = VP / NP
1507
+
1508
+ print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP])
1509
+ print(" S.symbol() =>", repr(S.symbol()))
1510
+ print()
1511
+
1512
+ print(Production(S, [NP]))
1513
+
1514
+ # Create some Grammar Productions
1515
+ grammar = CFG.fromstring(
1516
+ """
1517
+ S -> NP VP
1518
+ PP -> P NP
1519
+ NP -> Det N | NP PP
1520
+ VP -> V NP | VP PP
1521
+ Det -> 'a' | 'the'
1522
+ N -> 'dog' | 'cat'
1523
+ V -> 'chased' | 'sat'
1524
+ P -> 'on' | 'in'
1525
+ """
1526
+ )
1527
+
1528
+ print("A Grammar:", repr(grammar))
1529
+ print(" grammar.start() =>", repr(grammar.start()))
1530
+ print(" grammar.productions() =>", end=" ")
1531
+ # Use string.replace(...) is to line-wrap the output.
1532
+ print(repr(grammar.productions()).replace(",", ",\n" + " " * 25))
1533
+ print()
1534
+
1535
+
1536
+ def pcfg_demo():
1537
+ """
1538
+ A demonstration showing how a ``PCFG`` can be created and used.
1539
+ """
1540
+
1541
+ from nltk import induce_pcfg, treetransforms
1542
+ from nltk.corpus import treebank
1543
+ from nltk.parse import pchart
1544
+
1545
+ toy_pcfg1 = PCFG.fromstring(
1546
+ """
1547
+ S -> NP VP [1.0]
1548
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
1549
+ Det -> 'the' [0.8] | 'my' [0.2]
1550
+ N -> 'man' [0.5] | 'telescope' [0.5]
1551
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
1552
+ V -> 'ate' [0.35] | 'saw' [0.65]
1553
+ PP -> P NP [1.0]
1554
+ P -> 'with' [0.61] | 'under' [0.39]
1555
+ """
1556
+ )
1557
+
1558
+ toy_pcfg2 = PCFG.fromstring(
1559
+ """
1560
+ S -> NP VP [1.0]
1561
+ VP -> V NP [.59]
1562
+ VP -> V [.40]
1563
+ VP -> VP PP [.01]
1564
+ NP -> Det N [.41]
1565
+ NP -> Name [.28]
1566
+ NP -> NP PP [.31]
1567
+ PP -> P NP [1.0]
1568
+ V -> 'saw' [.21]
1569
+ V -> 'ate' [.51]
1570
+ V -> 'ran' [.28]
1571
+ N -> 'boy' [.11]
1572
+ N -> 'cookie' [.12]
1573
+ N -> 'table' [.13]
1574
+ N -> 'telescope' [.14]
1575
+ N -> 'hill' [.5]
1576
+ Name -> 'Jack' [.52]
1577
+ Name -> 'Bob' [.48]
1578
+ P -> 'with' [.61]
1579
+ P -> 'under' [.39]
1580
+ Det -> 'the' [.41]
1581
+ Det -> 'a' [.31]
1582
+ Det -> 'my' [.28]
1583
+ """
1584
+ )
1585
+
1586
+ pcfg_prods = toy_pcfg1.productions()
1587
+
1588
+ pcfg_prod = pcfg_prods[2]
1589
+ print("A PCFG production:", repr(pcfg_prod))
1590
+ print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs()))
1591
+ print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs()))
1592
+ print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob()))
1593
+ print()
1594
+
1595
+ grammar = toy_pcfg2
1596
+ print("A PCFG grammar:", repr(grammar))
1597
+ print(" grammar.start() =>", repr(grammar.start()))
1598
+ print(" grammar.productions() =>", end=" ")
1599
+ # Use .replace(...) is to line-wrap the output.
1600
+ print(repr(grammar.productions()).replace(",", ",\n" + " " * 26))
1601
+ print()
1602
+
1603
+ # extract productions from three trees and induce the PCFG
1604
+ print("Induce PCFG grammar from treebank data:")
1605
+
1606
+ productions = []
1607
+ item = treebank._fileids[0]
1608
+ for tree in treebank.parsed_sents(item)[:3]:
1609
+ # perform optional tree transformations, e.g.:
1610
+ tree.collapse_unary(collapsePOS=False)
1611
+ tree.chomsky_normal_form(horzMarkov=2)
1612
+
1613
+ productions += tree.productions()
1614
+
1615
+ S = Nonterminal("S")
1616
+ grammar = induce_pcfg(S, productions)
1617
+ print(grammar)
1618
+ print()
1619
+
1620
+ print("Parse sentence using induced grammar:")
1621
+
1622
+ parser = pchart.InsideChartParser(grammar)
1623
+ parser.trace(3)
1624
+
1625
+ # doesn't work as tokens are different:
1626
+ # sent = treebank.tokenized('wsj_0001.mrg')[0]
1627
+
1628
+ sent = treebank.parsed_sents(item)[0].leaves()
1629
+ print(sent)
1630
+ for parse in parser.parse(sent):
1631
+ print(parse)
1632
+
1633
+
1634
+ def fcfg_demo():
1635
+ import nltk.data
1636
+
1637
+ g = nltk.data.load("grammars/book_grammars/feat0.fcfg")
1638
+ print(g)
1639
+ print()
1640
+
1641
+
1642
+ def dg_demo():
1643
+ """
1644
+ A demonstration showing the creation and inspection of a
1645
+ ``DependencyGrammar``.
1646
+ """
1647
+ grammar = DependencyGrammar.fromstring(
1648
+ """
1649
+ 'scratch' -> 'cats' | 'walls'
1650
+ 'walls' -> 'the'
1651
+ 'cats' -> 'the'
1652
+ """
1653
+ )
1654
+ print(grammar)
1655
+
1656
+
1657
+ def sdg_demo():
1658
+ """
1659
+ A demonstration of how to read a string representation of
1660
+ a CoNLL format dependency tree.
1661
+ """
1662
+ from nltk.parse import DependencyGraph
1663
+
1664
+ dg = DependencyGraph(
1665
+ """
1666
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
1667
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
1668
+ 3 met met Prep Prep voor 8 mod _ _
1669
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
1670
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
1671
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
1672
+ 7 gaan ga V V hulp|inf 6 vc _ _
1673
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
1674
+ 9 , , Punc Punc komma 8 punct _ _
1675
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
1676
+ 11 of of Conj Conj neven 7 vc _ _
1677
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
1678
+ 13 . . Punc Punc punt 12 punct _ _
1679
+ """
1680
+ )
1681
+ tree = dg.tree()
1682
+ print(tree.pprint())
1683
+
1684
+
1685
+ def demo():
1686
+ cfg_demo()
1687
+ pcfg_demo()
1688
+ fcfg_demo()
1689
+ dg_demo()
1690
+ sdg_demo()
1691
+
1692
+
1693
+ if __name__ == "__main__":
1694
+ demo()
1695
+
1696
+ __all__ = [
1697
+ "Nonterminal",
1698
+ "nonterminals",
1699
+ "CFG",
1700
+ "Production",
1701
+ "PCFG",
1702
+ "ProbabilisticProduction",
1703
+ "DependencyGrammar",
1704
+ "DependencyProduction",
1705
+ "ProbabilisticDependencyGrammar",
1706
+ "induce_pcfg",
1707
+ "read_grammar",
1708
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/help.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit (NLTK) Help
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Provide structured access to documentation.
10
+ """
11
+
12
+ import re
13
+ from textwrap import wrap
14
+
15
+ from nltk.data import load
16
+
17
+
18
+ def brown_tagset(tagpattern=None):
19
+ _format_tagset("brown_tagset", tagpattern)
20
+
21
+
22
+ def claws5_tagset(tagpattern=None):
23
+ _format_tagset("claws5_tagset", tagpattern)
24
+
25
+
26
+ def upenn_tagset(tagpattern=None):
27
+ _format_tagset("upenn_tagset", tagpattern)
28
+
29
+
30
+ #####################################################################
31
+ # UTILITIES
32
+ #####################################################################
33
+
34
+
35
+ def _print_entries(tags, tagdict):
36
+ for tag in tags:
37
+ entry = tagdict[tag]
38
+ defn = [tag + ": " + entry[0]]
39
+ examples = wrap(
40
+ entry[1], width=75, initial_indent=" ", subsequent_indent=" "
41
+ )
42
+ print("\n".join(defn + examples))
43
+
44
+
45
+ def _format_tagset(tagset, tagpattern=None):
46
+ tagdict = load("help/tagsets/" + tagset + ".pickle")
47
+ if not tagpattern:
48
+ _print_entries(sorted(tagdict), tagdict)
49
+ elif tagpattern in tagdict:
50
+ _print_entries([tagpattern], tagdict)
51
+ else:
52
+ tagpattern = re.compile(tagpattern)
53
+ tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
54
+ if tags:
55
+ _print_entries(tags, tagdict)
56
+ else:
57
+ print("No matching tags found.")
58
+
59
+
60
+ if __name__ == "__main__":
61
+ brown_tagset(r"NN.*")
62
+ upenn_tagset(r".*\$")
63
+ claws5_tagset("UNDEFINED")
64
+ brown_tagset(r"NN")
llmeval-env/lib/python3.10/site-packages/nltk/internals.py ADDED
@@ -0,0 +1,1123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Internal utility functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Nitin Madnani <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import fnmatch
11
+ import locale
12
+ import os
13
+ import re
14
+ import stat
15
+ import subprocess
16
+ import sys
17
+ import textwrap
18
+ import types
19
+ import warnings
20
+ from xml.etree import ElementTree
21
+
22
+ ##########################################################################
23
+ # Java Via Command-Line
24
+ ##########################################################################
25
+
26
+ _java_bin = None
27
+ _java_options = []
28
+ # [xx] add classpath option to config_java?
29
+ def config_java(bin=None, options=None, verbose=False):
30
+ """
31
+ Configure nltk's java interface, by letting nltk know where it can
32
+ find the Java binary, and what extra options (if any) should be
33
+ passed to Java when it is run.
34
+
35
+ :param bin: The full path to the Java binary. If not specified,
36
+ then nltk will search the system for a Java binary; and if
37
+ one is not found, it will raise a ``LookupError`` exception.
38
+ :type bin: str
39
+ :param options: A list of options that should be passed to the
40
+ Java binary when it is called. A common value is
41
+ ``'-Xmx512m'``, which tells Java binary to increase
42
+ the maximum heap size to 512 megabytes. If no options are
43
+ specified, then do not modify the options list.
44
+ :type options: list(str)
45
+ """
46
+ global _java_bin, _java_options
47
+ _java_bin = find_binary(
48
+ "java",
49
+ bin,
50
+ env_vars=["JAVAHOME", "JAVA_HOME"],
51
+ verbose=verbose,
52
+ binary_names=["java.exe"],
53
+ )
54
+
55
+ if options is not None:
56
+ if isinstance(options, str):
57
+ options = options.split()
58
+ _java_options = list(options)
59
+
60
+
61
+ def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True):
62
+ """
63
+ Execute the given java command, by opening a subprocess that calls
64
+ Java. If java has not yet been configured, it will be configured
65
+ by calling ``config_java()`` with no arguments.
66
+
67
+ :param cmd: The java command that should be called, formatted as
68
+ a list of strings. Typically, the first string will be the name
69
+ of the java class; and the remaining strings will be arguments
70
+ for that java class.
71
+ :type cmd: list(str)
72
+
73
+ :param classpath: A ``':'`` separated list of directories, JAR
74
+ archives, and ZIP archives to search for class files.
75
+ :type classpath: str
76
+
77
+ :param stdin: Specify the executed program's
78
+ standard input file handles, respectively. Valid values are ``subprocess.PIPE``,
79
+ an existing file descriptor (a positive integer), an existing
80
+ file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a
81
+ new pipe to the child should be created. With None, no
82
+ redirection will occur; the child's file handles will be
83
+ inherited from the parent. Additionally, stderr can be
84
+ ``subprocess.STDOUT``, which indicates that the stderr data
85
+ from the applications should be captured into the same file
86
+ handle as for stdout.
87
+
88
+ :param stdout: Specify the executed program's standard output file
89
+ handle. See ``stdin`` for valid values.
90
+
91
+ :param stderr: Specify the executed program's standard error file
92
+ handle. See ``stdin`` for valid values.
93
+
94
+
95
+ :param blocking: If ``false``, then return immediately after
96
+ spawning the subprocess. In this case, the return value is
97
+ the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
98
+
99
+ :return: If ``blocking=True``, then return a tuple ``(stdout,
100
+ stderr)``, containing the stdout and stderr outputs generated
101
+ by the java command if the ``stdout`` and ``stderr`` parameters
102
+ were set to ``subprocess.PIPE``; or None otherwise. If
103
+ ``blocking=False``, then return a ``subprocess.Popen`` object.
104
+
105
+ :raise OSError: If the java command returns a nonzero return code.
106
+ """
107
+
108
+ subprocess_output_dict = {
109
+ "pipe": subprocess.PIPE,
110
+ "stdout": subprocess.STDOUT,
111
+ "devnull": subprocess.DEVNULL,
112
+ }
113
+
114
+ stdin = subprocess_output_dict.get(stdin, stdin)
115
+ stdout = subprocess_output_dict.get(stdout, stdout)
116
+ stderr = subprocess_output_dict.get(stderr, stderr)
117
+
118
+ if isinstance(cmd, str):
119
+ raise TypeError("cmd should be a list of strings")
120
+
121
+ # Make sure we know where a java binary is.
122
+ if _java_bin is None:
123
+ config_java()
124
+
125
+ # Set up the classpath.
126
+ if isinstance(classpath, str):
127
+ classpaths = [classpath]
128
+ else:
129
+ classpaths = list(classpath)
130
+ classpath = os.path.pathsep.join(classpaths)
131
+
132
+ # Construct the full command string.
133
+ cmd = list(cmd)
134
+ cmd = ["-cp", classpath] + cmd
135
+ cmd = [_java_bin] + _java_options + cmd
136
+
137
+ # Call java via a subprocess
138
+ p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
139
+ if not blocking:
140
+ return p
141
+ (stdout, stderr) = p.communicate()
142
+
143
+ # Check the return code.
144
+ if p.returncode != 0:
145
+ print(_decode_stdoutdata(stderr))
146
+ raise OSError("Java command failed : " + str(cmd))
147
+
148
+ return (stdout, stderr)
149
+
150
+
151
+ ######################################################################
152
+ # Parsing
153
+ ######################################################################
154
+
155
+
156
+ class ReadError(ValueError):
157
+ """
158
+ Exception raised by read_* functions when they fail.
159
+ :param position: The index in the input string where an error occurred.
160
+ :param expected: What was expected when an error occurred.
161
+ """
162
+
163
+ def __init__(self, expected, position):
164
+ ValueError.__init__(self, expected, position)
165
+ self.expected = expected
166
+ self.position = position
167
+
168
+ def __str__(self):
169
+ return f"Expected {self.expected} at {self.position}"
170
+
171
+
172
+ _STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
173
+
174
+
175
+ def read_str(s, start_position):
176
+ """
177
+ If a Python string literal begins at the specified position in the
178
+ given string, then return a tuple ``(val, end_position)``
179
+ containing the value of the string literal and the position where
180
+ it ends. Otherwise, raise a ``ReadError``.
181
+
182
+ :param s: A string that will be checked to see if within which a
183
+ Python string literal exists.
184
+ :type s: str
185
+
186
+ :param start_position: The specified beginning position of the string ``s``
187
+ to begin regex matching.
188
+ :type start_position: int
189
+
190
+ :return: A tuple containing the matched string literal evaluated as a
191
+ string and the end position of the string literal.
192
+ :rtype: tuple(str, int)
193
+
194
+ :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
195
+ match in ``s`` at ``start_position``, i.e., open quote. If the
196
+ ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
197
+ end of the first match, i.e., close quote.
198
+ :raise ValueError: If an invalid string (i.e., contains an invalid
199
+ escape sequence) is passed into the ``eval``.
200
+
201
+ :Example:
202
+
203
+ >>> from nltk.internals import read_str
204
+ >>> read_str('"Hello", World!', 0)
205
+ ('Hello', 7)
206
+
207
+ """
208
+ # Read the open quote, and any modifiers.
209
+ m = _STRING_START_RE.match(s, start_position)
210
+ if not m:
211
+ raise ReadError("open quote", start_position)
212
+ quotemark = m.group(1)
213
+
214
+ # Find the close quote.
215
+ _STRING_END_RE = re.compile(r"\\|%s" % quotemark)
216
+ position = m.end()
217
+ while True:
218
+ match = _STRING_END_RE.search(s, position)
219
+ if not match:
220
+ raise ReadError("close quote", position)
221
+ if match.group(0) == "\\":
222
+ position = match.end() + 1
223
+ else:
224
+ break
225
+
226
+ # Process it, using eval. Strings with invalid escape sequences
227
+ # might raise ValueError.
228
+ try:
229
+ return eval(s[start_position : match.end()]), match.end()
230
+ except ValueError as e:
231
+ raise ReadError("valid escape sequence", start_position) from e
232
+
233
+
234
+ _READ_INT_RE = re.compile(r"-?\d+")
235
+
236
+
237
+ def read_int(s, start_position):
238
+ """
239
+ If an integer begins at the specified position in the given
240
+ string, then return a tuple ``(val, end_position)`` containing the
241
+ value of the integer and the position where it ends. Otherwise,
242
+ raise a ``ReadError``.
243
+
244
+ :param s: A string that will be checked to see if within which a
245
+ Python integer exists.
246
+ :type s: str
247
+
248
+ :param start_position: The specified beginning position of the string ``s``
249
+ to begin regex matching.
250
+ :type start_position: int
251
+
252
+ :return: A tuple containing the matched integer casted to an int,
253
+ and the end position of the int in ``s``.
254
+ :rtype: tuple(int, int)
255
+
256
+ :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
257
+ match in ``s`` at ``start_position``.
258
+
259
+ :Example:
260
+
261
+ >>> from nltk.internals import read_int
262
+ >>> read_int('42 is the answer', 0)
263
+ (42, 2)
264
+
265
+ """
266
+ m = _READ_INT_RE.match(s, start_position)
267
+ if not m:
268
+ raise ReadError("integer", start_position)
269
+ return int(m.group()), m.end()
270
+
271
+
272
+ _READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?")
273
+
274
+
275
+ def read_number(s, start_position):
276
+ """
277
+ If an integer or float begins at the specified position in the
278
+ given string, then return a tuple ``(val, end_position)``
279
+ containing the value of the number and the position where it ends.
280
+ Otherwise, raise a ``ReadError``.
281
+
282
+ :param s: A string that will be checked to see if within which a
283
+ Python number exists.
284
+ :type s: str
285
+
286
+ :param start_position: The specified beginning position of the string ``s``
287
+ to begin regex matching.
288
+ :type start_position: int
289
+
290
+ :return: A tuple containing the matched number casted to a ``float``,
291
+ and the end position of the number in ``s``.
292
+ :rtype: tuple(float, int)
293
+
294
+ :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
295
+ match in ``s`` at ``start_position``.
296
+
297
+ :Example:
298
+
299
+ >>> from nltk.internals import read_number
300
+ >>> read_number('Pi is 3.14159', 6)
301
+ (3.14159, 13)
302
+
303
+ """
304
+ m = _READ_NUMBER_VALUE.match(s, start_position)
305
+ if not m or not (m.group(1) or m.group(2)):
306
+ raise ReadError("number", start_position)
307
+ if m.group(2):
308
+ return float(m.group()), m.end()
309
+ else:
310
+ return int(m.group()), m.end()
311
+
312
+
313
+ ######################################################################
314
+ # Check if a method has been overridden
315
+ ######################################################################
316
+
317
+
318
+ def overridden(method):
319
+ """
320
+ :return: True if ``method`` overrides some method with the same
321
+ name in a base class. This is typically used when defining
322
+ abstract base classes or interfaces, to allow subclasses to define
323
+ either of two related methods:
324
+
325
+ >>> class EaterI:
326
+ ... '''Subclass must define eat() or batch_eat().'''
327
+ ... def eat(self, food):
328
+ ... if overridden(self.batch_eat):
329
+ ... return self.batch_eat([food])[0]
330
+ ... else:
331
+ ... raise NotImplementedError()
332
+ ... def batch_eat(self, foods):
333
+ ... return [self.eat(food) for food in foods]
334
+
335
+ :type method: instance method
336
+ """
337
+ if isinstance(method, types.MethodType) and method.__self__.__class__ is not None:
338
+ name = method.__name__
339
+ funcs = [
340
+ cls.__dict__[name]
341
+ for cls in _mro(method.__self__.__class__)
342
+ if name in cls.__dict__
343
+ ]
344
+ return len(funcs) > 1
345
+ else:
346
+ raise TypeError("Expected an instance method.")
347
+
348
+
349
+ def _mro(cls):
350
+ """
351
+ Return the method resolution order for ``cls`` -- i.e., a list
352
+ containing ``cls`` and all its base classes, in the order in which
353
+ they would be checked by ``getattr``. For new-style classes, this
354
+ is just cls.__mro__. For classic classes, this can be obtained by
355
+ a depth-first left-to-right traversal of ``__bases__``.
356
+ """
357
+ if isinstance(cls, type):
358
+ return cls.__mro__
359
+ else:
360
+ mro = [cls]
361
+ for base in cls.__bases__:
362
+ mro.extend(_mro(base))
363
+ return mro
364
+
365
+
366
+ ######################################################################
367
+ # Deprecation decorator & base class
368
+ ######################################################################
369
+ # [xx] dedent msg first if it comes from a docstring.
370
+
371
+
372
+ def _add_epytext_field(obj, field, message):
373
+ """Add an epytext @field to a given object's docstring."""
374
+ indent = ""
375
+ # If we already have a docstring, then add a blank line to separate
376
+ # it from the new field, and check its indentation.
377
+ if obj.__doc__:
378
+ obj.__doc__ = obj.__doc__.rstrip() + "\n\n"
379
+ indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs())
380
+ if indents:
381
+ indent = min(indents)
382
+ # If we don't have a docstring, add an empty one.
383
+ else:
384
+ obj.__doc__ = ""
385
+
386
+ obj.__doc__ += textwrap.fill(
387
+ f"@{field}: {message}",
388
+ initial_indent=indent,
389
+ subsequent_indent=indent + " ",
390
+ )
391
+
392
+
393
+ def deprecated(message):
394
+ """
395
+ A decorator used to mark functions as deprecated. This will cause
396
+ a warning to be printed the when the function is used. Usage:
397
+
398
+ >>> from nltk.internals import deprecated
399
+ >>> @deprecated('Use foo() instead')
400
+ ... def bar(x):
401
+ ... print(x/10)
402
+
403
+ """
404
+
405
+ def decorator(func):
406
+ msg = f"Function {func.__name__}() has been deprecated. {message}"
407
+ msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
408
+
409
+ def newFunc(*args, **kwargs):
410
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
411
+ return func(*args, **kwargs)
412
+
413
+ # Copy the old function's name, docstring, & dict
414
+ newFunc.__dict__.update(func.__dict__)
415
+ newFunc.__name__ = func.__name__
416
+ newFunc.__doc__ = func.__doc__
417
+ newFunc.__deprecated__ = True
418
+ # Add a @deprecated field to the docstring.
419
+ _add_epytext_field(newFunc, "deprecated", message)
420
+ return newFunc
421
+
422
+ return decorator
423
+
424
+
425
+ class Deprecated:
426
+ """
427
+ A base class used to mark deprecated classes. A typical usage is to
428
+ alert users that the name of a class has changed:
429
+
430
+ >>> from nltk.internals import Deprecated
431
+ >>> class NewClassName:
432
+ ... pass # All logic goes here.
433
+ ...
434
+ >>> class OldClassName(Deprecated, NewClassName):
435
+ ... "Use NewClassName instead."
436
+
437
+ The docstring of the deprecated class will be used in the
438
+ deprecation warning message.
439
+ """
440
+
441
+ def __new__(cls, *args, **kwargs):
442
+ # Figure out which class is the deprecated one.
443
+ dep_cls = None
444
+ for base in _mro(cls):
445
+ if Deprecated in base.__bases__:
446
+ dep_cls = base
447
+ break
448
+ assert dep_cls, "Unable to determine which base is deprecated."
449
+
450
+ # Construct an appropriate warning.
451
+ doc = dep_cls.__doc__ or "".strip()
452
+ # If there's a @deprecated field, strip off the field marker.
453
+ doc = re.sub(r"\A\s*@deprecated:", r"", doc)
454
+ # Strip off any indentation.
455
+ doc = re.sub(r"(?m)^\s*", "", doc)
456
+ # Construct a 'name' string.
457
+ name = "Class %s" % dep_cls.__name__
458
+ if cls != dep_cls:
459
+ name += " (base class for %s)" % cls.__name__
460
+ # Put it all together.
461
+ msg = f"{name} has been deprecated. {doc}"
462
+ # Wrap it.
463
+ msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
464
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
465
+ # Do the actual work of __new__.
466
+ return object.__new__(cls)
467
+
468
+
469
+ ##########################################################################
470
+ # COUNTER, FOR UNIQUE NAMING
471
+ ##########################################################################
472
+
473
+
474
+ class Counter:
475
+ """
476
+ A counter that auto-increments each time its value is read.
477
+ """
478
+
479
+ def __init__(self, initial_value=0):
480
+ self._value = initial_value
481
+
482
+ def get(self):
483
+ self._value += 1
484
+ return self._value
485
+
486
+
487
+ ##########################################################################
488
+ # Search for files/binaries
489
+ ##########################################################################
490
+
491
+
492
+ def find_file_iter(
493
+ filename,
494
+ env_vars=(),
495
+ searchpath=(),
496
+ file_names=None,
497
+ url=None,
498
+ verbose=False,
499
+ finding_dir=False,
500
+ ):
501
+ """
502
+ Search for a file to be used by nltk.
503
+
504
+ :param filename: The name or path of the file.
505
+ :param env_vars: A list of environment variable names to check.
506
+ :param file_names: A list of alternative file names to check.
507
+ :param searchpath: List of directories to search.
508
+ :param url: URL presented to user for download help.
509
+ :param verbose: Whether or not to print path when a file is found.
510
+ """
511
+ file_names = [filename] + (file_names or [])
512
+ assert isinstance(filename, str)
513
+ assert not isinstance(file_names, str)
514
+ assert not isinstance(searchpath, str)
515
+ if isinstance(env_vars, str):
516
+ env_vars = env_vars.split()
517
+ yielded = False
518
+
519
+ # File exists, no magic
520
+ for alternative in file_names:
521
+ path_to_file = os.path.join(filename, alternative)
522
+ if os.path.isfile(path_to_file):
523
+ if verbose:
524
+ print(f"[Found {filename}: {path_to_file}]")
525
+ yielded = True
526
+ yield path_to_file
527
+ # Check the bare alternatives
528
+ if os.path.isfile(alternative):
529
+ if verbose:
530
+ print(f"[Found {filename}: {alternative}]")
531
+ yielded = True
532
+ yield alternative
533
+ # Check if the alternative is inside a 'file' directory
534
+ path_to_file = os.path.join(filename, "file", alternative)
535
+ if os.path.isfile(path_to_file):
536
+ if verbose:
537
+ print(f"[Found {filename}: {path_to_file}]")
538
+ yielded = True
539
+ yield path_to_file
540
+
541
+ # Check environment variables
542
+ for env_var in env_vars:
543
+ if env_var in os.environ:
544
+ if finding_dir: # This is to file a directory instead of file
545
+ yielded = True
546
+ yield os.environ[env_var]
547
+
548
+ for env_dir in os.environ[env_var].split(os.pathsep):
549
+ # Check if the environment variable contains a direct path to the bin
550
+ if os.path.isfile(env_dir):
551
+ if verbose:
552
+ print(f"[Found {filename}: {env_dir}]")
553
+ yielded = True
554
+ yield env_dir
555
+ # Check if the possible bin names exist inside the environment variable directories
556
+ for alternative in file_names:
557
+ path_to_file = os.path.join(env_dir, alternative)
558
+ if os.path.isfile(path_to_file):
559
+ if verbose:
560
+ print(f"[Found {filename}: {path_to_file}]")
561
+ yielded = True
562
+ yield path_to_file
563
+ # Check if the alternative is inside a 'file' directory
564
+ # path_to_file = os.path.join(env_dir, 'file', alternative)
565
+
566
+ # Check if the alternative is inside a 'bin' directory
567
+ path_to_file = os.path.join(env_dir, "bin", alternative)
568
+
569
+ if os.path.isfile(path_to_file):
570
+ if verbose:
571
+ print(f"[Found {filename}: {path_to_file}]")
572
+ yielded = True
573
+ yield path_to_file
574
+
575
+ # Check the path list.
576
+ for directory in searchpath:
577
+ for alternative in file_names:
578
+ path_to_file = os.path.join(directory, alternative)
579
+ if os.path.isfile(path_to_file):
580
+ yielded = True
581
+ yield path_to_file
582
+
583
+ # If we're on a POSIX system, then try using the 'which' command
584
+ # to find the file.
585
+ if os.name == "posix":
586
+ for alternative in file_names:
587
+ try:
588
+ p = subprocess.Popen(
589
+ ["which", alternative],
590
+ stdout=subprocess.PIPE,
591
+ stderr=subprocess.PIPE,
592
+ )
593
+ stdout, stderr = p.communicate()
594
+ path = _decode_stdoutdata(stdout).strip()
595
+ if path.endswith(alternative) and os.path.exists(path):
596
+ if verbose:
597
+ print(f"[Found {filename}: {path}]")
598
+ yielded = True
599
+ yield path
600
+ except (KeyboardInterrupt, SystemExit, OSError):
601
+ raise
602
+ finally:
603
+ pass
604
+
605
+ if not yielded:
606
+ msg = (
607
+ "NLTK was unable to find the %s file!"
608
+ "\nUse software specific "
609
+ "configuration parameters" % filename
610
+ )
611
+ if env_vars:
612
+ msg += " or set the %s environment variable" % env_vars[0]
613
+ msg += "."
614
+ if searchpath:
615
+ msg += "\n\n Searched in:"
616
+ msg += "".join("\n - %s" % d for d in searchpath)
617
+ if url:
618
+ msg += f"\n\n For more information on {filename}, see:\n <{url}>"
619
+ div = "=" * 75
620
+ raise LookupError(f"\n\n{div}\n{msg}\n{div}")
621
+
622
+
623
+ def find_file(
624
+ filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
625
+ ):
626
+ return next(
627
+ find_file_iter(filename, env_vars, searchpath, file_names, url, verbose)
628
+ )
629
+
630
+
631
+ def find_dir(
632
+ filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
633
+ ):
634
+ return next(
635
+ find_file_iter(
636
+ filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True
637
+ )
638
+ )
639
+
640
+
641
+ def find_binary_iter(
642
+ name,
643
+ path_to_bin=None,
644
+ env_vars=(),
645
+ searchpath=(),
646
+ binary_names=None,
647
+ url=None,
648
+ verbose=False,
649
+ ):
650
+ """
651
+ Search for a file to be used by nltk.
652
+
653
+ :param name: The name or path of the file.
654
+ :param path_to_bin: The user-supplied binary location (deprecated)
655
+ :param env_vars: A list of environment variable names to check.
656
+ :param file_names: A list of alternative file names to check.
657
+ :param searchpath: List of directories to search.
658
+ :param url: URL presented to user for download help.
659
+ :param verbose: Whether or not to print path when a file is found.
660
+ """
661
+ yield from find_file_iter(
662
+ path_to_bin or name, env_vars, searchpath, binary_names, url, verbose
663
+ )
664
+
665
+
666
+ def find_binary(
667
+ name,
668
+ path_to_bin=None,
669
+ env_vars=(),
670
+ searchpath=(),
671
+ binary_names=None,
672
+ url=None,
673
+ verbose=False,
674
+ ):
675
+ return next(
676
+ find_binary_iter(
677
+ name, path_to_bin, env_vars, searchpath, binary_names, url, verbose
678
+ )
679
+ )
680
+
681
+
682
+ def find_jar_iter(
683
+ name_pattern,
684
+ path_to_jar=None,
685
+ env_vars=(),
686
+ searchpath=(),
687
+ url=None,
688
+ verbose=False,
689
+ is_regex=False,
690
+ ):
691
+ """
692
+ Search for a jar that is used by nltk.
693
+
694
+ :param name_pattern: The name of the jar file
695
+ :param path_to_jar: The user-supplied jar location, or None.
696
+ :param env_vars: A list of environment variable names to check
697
+ in addition to the CLASSPATH variable which is
698
+ checked by default.
699
+ :param searchpath: List of directories to search.
700
+ :param is_regex: Whether name is a regular expression.
701
+ """
702
+
703
+ assert isinstance(name_pattern, str)
704
+ assert not isinstance(searchpath, str)
705
+ if isinstance(env_vars, str):
706
+ env_vars = env_vars.split()
707
+ yielded = False
708
+
709
+ # Make sure we check the CLASSPATH first
710
+ env_vars = ["CLASSPATH"] + list(env_vars)
711
+
712
+ # If an explicit location was given, then check it, and yield it if
713
+ # it's present; otherwise, complain.
714
+ if path_to_jar is not None:
715
+ if os.path.isfile(path_to_jar):
716
+ yielded = True
717
+ yield path_to_jar
718
+ else:
719
+ raise LookupError(
720
+ f"Could not find {name_pattern} jar file at {path_to_jar}"
721
+ )
722
+
723
+ # Check environment variables
724
+ for env_var in env_vars:
725
+ if env_var in os.environ:
726
+ if env_var == "CLASSPATH":
727
+ classpath = os.environ["CLASSPATH"]
728
+ for cp in classpath.split(os.path.pathsep):
729
+ cp = os.path.expanduser(cp)
730
+ if os.path.isfile(cp):
731
+ filename = os.path.basename(cp)
732
+ if (
733
+ is_regex
734
+ and re.match(name_pattern, filename)
735
+ or (not is_regex and filename == name_pattern)
736
+ ):
737
+ if verbose:
738
+ print(f"[Found {name_pattern}: {cp}]")
739
+ yielded = True
740
+ yield cp
741
+ # The case where user put directory containing the jar file in the classpath
742
+ if os.path.isdir(cp):
743
+ if not is_regex:
744
+ if os.path.isfile(os.path.join(cp, name_pattern)):
745
+ if verbose:
746
+ print(f"[Found {name_pattern}: {cp}]")
747
+ yielded = True
748
+ yield os.path.join(cp, name_pattern)
749
+ else:
750
+ # Look for file using regular expression
751
+ for file_name in os.listdir(cp):
752
+ if re.match(name_pattern, file_name):
753
+ if verbose:
754
+ print(
755
+ "[Found %s: %s]"
756
+ % (
757
+ name_pattern,
758
+ os.path.join(cp, file_name),
759
+ )
760
+ )
761
+ yielded = True
762
+ yield os.path.join(cp, file_name)
763
+
764
+ else:
765
+ jar_env = os.path.expanduser(os.environ[env_var])
766
+ jar_iter = (
767
+ (
768
+ os.path.join(jar_env, path_to_jar)
769
+ for path_to_jar in os.listdir(jar_env)
770
+ )
771
+ if os.path.isdir(jar_env)
772
+ else (jar_env,)
773
+ )
774
+ for path_to_jar in jar_iter:
775
+ if os.path.isfile(path_to_jar):
776
+ filename = os.path.basename(path_to_jar)
777
+ if (
778
+ is_regex
779
+ and re.match(name_pattern, filename)
780
+ or (not is_regex and filename == name_pattern)
781
+ ):
782
+ if verbose:
783
+ print(f"[Found {name_pattern}: {path_to_jar}]")
784
+ yielded = True
785
+ yield path_to_jar
786
+
787
+ # Check the path list.
788
+ for directory in searchpath:
789
+ if is_regex:
790
+ for filename in os.listdir(directory):
791
+ path_to_jar = os.path.join(directory, filename)
792
+ if os.path.isfile(path_to_jar):
793
+ if re.match(name_pattern, filename):
794
+ if verbose:
795
+ print(f"[Found {filename}: {path_to_jar}]")
796
+ yielded = True
797
+ yield path_to_jar
798
+ else:
799
+ path_to_jar = os.path.join(directory, name_pattern)
800
+ if os.path.isfile(path_to_jar):
801
+ if verbose:
802
+ print(f"[Found {name_pattern}: {path_to_jar}]")
803
+ yielded = True
804
+ yield path_to_jar
805
+
806
+ if not yielded:
807
+ # If nothing was found, raise an error
808
+ msg = "NLTK was unable to find %s!" % name_pattern
809
+ if env_vars:
810
+ msg += " Set the %s environment variable" % env_vars[0]
811
+ msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ")
812
+ if searchpath:
813
+ msg += "\n\n Searched in:"
814
+ msg += "".join("\n - %s" % d for d in searchpath)
815
+ if url:
816
+ msg += "\n\n For more information, on {}, see:\n <{}>".format(
817
+ name_pattern,
818
+ url,
819
+ )
820
+ div = "=" * 75
821
+ raise LookupError(f"\n\n{div}\n{msg}\n{div}")
822
+
823
+
824
+ def find_jar(
825
+ name_pattern,
826
+ path_to_jar=None,
827
+ env_vars=(),
828
+ searchpath=(),
829
+ url=None,
830
+ verbose=False,
831
+ is_regex=False,
832
+ ):
833
+ return next(
834
+ find_jar_iter(
835
+ name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex
836
+ )
837
+ )
838
+
839
+
840
+ def find_jars_within_path(path_to_jars):
841
+ return [
842
+ os.path.join(root, filename)
843
+ for root, dirnames, filenames in os.walk(path_to_jars)
844
+ for filename in fnmatch.filter(filenames, "*.jar")
845
+ ]
846
+
847
+
848
+ def _decode_stdoutdata(stdoutdata):
849
+ """Convert data read from stdout/stderr to unicode"""
850
+ if not isinstance(stdoutdata, bytes):
851
+ return stdoutdata
852
+
853
+ encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
854
+ if encoding is None:
855
+ return stdoutdata.decode()
856
+ return stdoutdata.decode(encoding)
857
+
858
+
859
+ ##########################################################################
860
+ # Import Stdlib Module
861
+ ##########################################################################
862
+
863
+
864
+ def import_from_stdlib(module):
865
+ """
866
+ When python is run from within the nltk/ directory tree, the
867
+ current directory is included at the beginning of the search path.
868
+ Unfortunately, that means that modules within nltk can sometimes
869
+ shadow standard library modules. As an example, the stdlib
870
+ 'inspect' module will attempt to import the stdlib 'tokenize'
871
+ module, but will instead end up importing NLTK's 'tokenize' module
872
+ instead (causing the import to fail).
873
+ """
874
+ old_path = sys.path
875
+ sys.path = [d for d in sys.path if d not in ("", ".")]
876
+ m = __import__(module)
877
+ sys.path = old_path
878
+ return m
879
+
880
+
881
+ ##########################################################################
882
+ # Wrapper for ElementTree Elements
883
+ ##########################################################################
884
+
885
+
886
+ class ElementWrapper:
887
+ """
888
+ A wrapper around ElementTree Element objects whose main purpose is
889
+ to provide nicer __repr__ and __str__ methods. In addition, any
890
+ of the wrapped Element's methods that return other Element objects
891
+ are overridden to wrap those values before returning them.
892
+
893
+ This makes Elements more convenient to work with in
894
+ interactive sessions and doctests, at the expense of some
895
+ efficiency.
896
+ """
897
+
898
+ # Prevent double-wrapping:
899
+ def __new__(cls, etree):
900
+ """
901
+ Create and return a wrapper around a given Element object.
902
+ If ``etree`` is an ``ElementWrapper``, then ``etree`` is
903
+ returned as-is.
904
+ """
905
+ if isinstance(etree, ElementWrapper):
906
+ return etree
907
+ else:
908
+ return object.__new__(ElementWrapper)
909
+
910
+ def __init__(self, etree):
911
+ r"""
912
+ Initialize a new Element wrapper for ``etree``.
913
+
914
+ If ``etree`` is a string, then it will be converted to an
915
+ Element object using ``ElementTree.fromstring()`` first:
916
+
917
+ >>> ElementWrapper("<test></test>")
918
+ <Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
919
+
920
+ """
921
+ if isinstance(etree, str):
922
+ etree = ElementTree.fromstring(etree)
923
+ self.__dict__["_etree"] = etree
924
+
925
+ def unwrap(self):
926
+ """
927
+ Return the Element object wrapped by this wrapper.
928
+ """
929
+ return self._etree
930
+
931
+ ##////////////////////////////////////////////////////////////
932
+ # { String Representation
933
+ ##////////////////////////////////////////////////////////////
934
+
935
+ def __repr__(self):
936
+ s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8")
937
+ if len(s) > 60:
938
+ e = s.rfind("<")
939
+ if (len(s) - e) > 30:
940
+ e = -20
941
+ s = f"{s[:30]}...{s[e:]}"
942
+ return "<Element %r>" % s
943
+
944
+ def __str__(self):
945
+ """
946
+ :return: the result of applying ``ElementTree.tostring()`` to
947
+ the wrapped Element object.
948
+ """
949
+ return (
950
+ ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip()
951
+ )
952
+
953
+ ##////////////////////////////////////////////////////////////
954
+ # { Element interface Delegation (pass-through)
955
+ ##////////////////////////////////////////////////////////////
956
+
957
+ def __getattr__(self, attrib):
958
+ return getattr(self._etree, attrib)
959
+
960
+ def __setattr__(self, attr, value):
961
+ return setattr(self._etree, attr, value)
962
+
963
+ def __delattr__(self, attr):
964
+ return delattr(self._etree, attr)
965
+
966
+ def __setitem__(self, index, element):
967
+ self._etree[index] = element
968
+
969
+ def __delitem__(self, index):
970
+ del self._etree[index]
971
+
972
+ def __setslice__(self, start, stop, elements):
973
+ self._etree[start:stop] = elements
974
+
975
+ def __delslice__(self, start, stop):
976
+ del self._etree[start:stop]
977
+
978
+ def __len__(self):
979
+ return len(self._etree)
980
+
981
+ ##////////////////////////////////////////////////////////////
982
+ # { Element interface Delegation (wrap result)
983
+ ##////////////////////////////////////////////////////////////
984
+
985
+ def __getitem__(self, index):
986
+ return ElementWrapper(self._etree[index])
987
+
988
+ def __getslice__(self, start, stop):
989
+ return [ElementWrapper(elt) for elt in self._etree[start:stop]]
990
+
991
+ def getchildren(self):
992
+ return [ElementWrapper(elt) for elt in self._etree]
993
+
994
+ def getiterator(self, tag=None):
995
+ return (ElementWrapper(elt) for elt in self._etree.getiterator(tag))
996
+
997
+ def makeelement(self, tag, attrib):
998
+ return ElementWrapper(self._etree.makeelement(tag, attrib))
999
+
1000
+ def find(self, path):
1001
+ elt = self._etree.find(path)
1002
+ if elt is None:
1003
+ return elt
1004
+ else:
1005
+ return ElementWrapper(elt)
1006
+
1007
+ def findall(self, path):
1008
+ return [ElementWrapper(elt) for elt in self._etree.findall(path)]
1009
+
1010
+
1011
+ ######################################################################
1012
+ # Helper for Handling Slicing
1013
+ ######################################################################
1014
+
1015
+
1016
+ def slice_bounds(sequence, slice_obj, allow_step=False):
1017
+ """
1018
+ Given a slice, return the corresponding (start, stop) bounds,
1019
+ taking into account None indices and negative indices. The
1020
+ following guarantees are made for the returned start and stop values:
1021
+
1022
+ - 0 <= start <= len(sequence)
1023
+ - 0 <= stop <= len(sequence)
1024
+ - start <= stop
1025
+
1026
+ :raise ValueError: If ``slice_obj.step`` is not None.
1027
+ :param allow_step: If true, then the slice object may have a
1028
+ non-None step. If it does, then return a tuple
1029
+ (start, stop, step).
1030
+ """
1031
+ start, stop = (slice_obj.start, slice_obj.stop)
1032
+
1033
+ # If allow_step is true, then include the step in our return
1034
+ # value tuple.
1035
+ if allow_step:
1036
+ step = slice_obj.step
1037
+ if step is None:
1038
+ step = 1
1039
+ # Use a recursive call without allow_step to find the slice
1040
+ # bounds. If step is negative, then the roles of start and
1041
+ # stop (in terms of default values, etc), are swapped.
1042
+ if step < 0:
1043
+ start, stop = slice_bounds(sequence, slice(stop, start))
1044
+ else:
1045
+ start, stop = slice_bounds(sequence, slice(start, stop))
1046
+ return start, stop, step
1047
+
1048
+ # Otherwise, make sure that no non-default step value is used.
1049
+ elif slice_obj.step not in (None, 1):
1050
+ raise ValueError(
1051
+ "slices with steps are not supported by %s" % sequence.__class__.__name__
1052
+ )
1053
+
1054
+ # Supply default offsets.
1055
+ if start is None:
1056
+ start = 0
1057
+ if stop is None:
1058
+ stop = len(sequence)
1059
+
1060
+ # Handle negative indices.
1061
+ if start < 0:
1062
+ start = max(0, len(sequence) + start)
1063
+ if stop < 0:
1064
+ stop = max(0, len(sequence) + stop)
1065
+
1066
+ # Make sure stop doesn't go past the end of the list. Note that
1067
+ # we avoid calculating len(sequence) if possible, because for lazy
1068
+ # sequences, calculating the length of a sequence can be expensive.
1069
+ if stop > 0:
1070
+ try:
1071
+ sequence[stop - 1]
1072
+ except IndexError:
1073
+ stop = len(sequence)
1074
+
1075
+ # Make sure start isn't past stop.
1076
+ start = min(start, stop)
1077
+
1078
+ # That's all folks!
1079
+ return start, stop
1080
+
1081
+
1082
+ ######################################################################
1083
+ # Permission Checking
1084
+ ######################################################################
1085
+
1086
+
1087
+ def is_writable(path):
1088
+ # Ensure that it exists.
1089
+ if not os.path.exists(path):
1090
+ return False
1091
+
1092
+ # If we're on a posix system, check its permissions.
1093
+ if hasattr(os, "getuid"):
1094
+ statdata = os.stat(path)
1095
+ perm = stat.S_IMODE(statdata.st_mode)
1096
+ # is it world-writable?
1097
+ if perm & 0o002:
1098
+ return True
1099
+ # do we own it?
1100
+ elif statdata.st_uid == os.getuid() and (perm & 0o200):
1101
+ return True
1102
+ # are we in a group that can write to it?
1103
+ elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020):
1104
+ return True
1105
+ # otherwise, we can't write to it.
1106
+ else:
1107
+ return False
1108
+
1109
+ # Otherwise, we'll assume it's writable.
1110
+ # [xx] should we do other checks on other platforms?
1111
+ return True
1112
+
1113
+
1114
+ ######################################################################
1115
+ # NLTK Error reporting
1116
+ ######################################################################
1117
+
1118
+
1119
+ def raise_unorderable_types(ordering, a, b):
1120
+ raise TypeError(
1121
+ "unorderable types: %s() %s %s()"
1122
+ % (type(a).__name__, ordering, type(b).__name__)
1123
+ )
llmeval-env/lib/python3.10/site-packages/nltk/jsontags.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: JSON Encoder/Decoder Helpers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Xu <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Register JSON tags, so the nltk data loader knows what module and class to look for.
11
+
12
+ NLTK uses simple '!' tags to mark the types of objects, but the fully-qualified
13
+ "tag:nltk.org,2011:" prefix is also accepted in case anyone ends up
14
+ using it.
15
+ """
16
+
17
+ import json
18
+
19
+ json_tags = {}
20
+
21
+ TAG_PREFIX = "!"
22
+
23
+
24
+ def register_tag(cls):
25
+ """
26
+ Decorates a class to register it's json tag.
27
+ """
28
+ json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls
29
+ return cls
30
+
31
+
32
+ class JSONTaggedEncoder(json.JSONEncoder):
33
+ def default(self, obj):
34
+ obj_tag = getattr(obj, "json_tag", None)
35
+ if obj_tag is None:
36
+ return super().default(obj)
37
+ obj_tag = TAG_PREFIX + obj_tag
38
+ obj = obj.encode_json_obj()
39
+ return {obj_tag: obj}
40
+
41
+
42
+ class JSONTaggedDecoder(json.JSONDecoder):
43
+ def decode(self, s):
44
+ return self.decode_obj(super().decode(s))
45
+
46
+ @classmethod
47
+ def decode_obj(cls, obj):
48
+ # Decode nested objects first.
49
+ if isinstance(obj, dict):
50
+ obj = {key: cls.decode_obj(val) for (key, val) in obj.items()}
51
+ elif isinstance(obj, list):
52
+ obj = list(cls.decode_obj(val) for val in obj)
53
+ # Check if we have a tagged object.
54
+ if not isinstance(obj, dict) or len(obj) != 1:
55
+ return obj
56
+ obj_tag = next(iter(obj.keys()))
57
+ if not obj_tag.startswith("!"):
58
+ return obj
59
+ if obj_tag not in json_tags:
60
+ raise ValueError("Unknown tag", obj_tag)
61
+ obj_cls = json_tags[obj_tag]
62
+ return obj_cls.decode_json_obj(obj[obj_tag])
63
+
64
+
65
+ __all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"]
llmeval-env/lib/python3.10/site-packages/nltk/langnames.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Codes
2
+ #
3
+ # Copyright (C) 2022-2023 NLTK Project
4
+ # Author: Eric Kafe <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+ # iso639-3 language codes (C) https://iso639-3.sil.org/
9
+
10
+ """
11
+ Translate between language names and language codes.
12
+
13
+ The iso639-3 language codes were downloaded from the registration authority at
14
+ https://iso639-3.sil.org/
15
+
16
+ The iso639-3 codeset is evolving, so retired language codes are kept in the
17
+ "iso639retired" dictionary, which is used as fallback by the wrapper functions
18
+ "langname" and "langcode", in order to support the lookup of retired codes.
19
+
20
+ The "langcode" function returns the current iso639-3 code if there is one,
21
+ and falls back to the retired code otherwise. As specified by BCP-47,
22
+ it returns the shortest (2-letter) code by default, but 3-letter codes
23
+ are also available:
24
+
25
+ >>> import nltk.langnames as lgn
26
+ >>> lgn.langname('fri') #'fri' is a retired code
27
+ 'Western Frisian'
28
+
29
+ The current code is different from the retired one:
30
+ >>> lgn.langcode('Western Frisian')
31
+ 'fy'
32
+
33
+ >>> lgn.langcode('Western Frisian', typ = 3)
34
+ 'fry'
35
+
36
+ """
37
+
38
+ import re
39
+ from warnings import warn
40
+
41
+ from nltk.corpus import bcp47
42
+
43
+ codepattern = re.compile("[a-z][a-z][a-z]?")
44
+
45
+
46
+ def langname(tag, typ="full"):
47
+ """
48
+ Convert a composite BCP-47 tag to a language name
49
+
50
+ >>> from nltk.langnames import langname
51
+ >>> langname('ca-Latn-ES-valencia')
52
+ 'Catalan: Latin: Spain: Valencian'
53
+
54
+ >>> langname('ca-Latn-ES-valencia', typ="short")
55
+ 'Catalan'
56
+ """
57
+ tags = tag.split("-")
58
+ code = tags[0].lower()
59
+ if codepattern.fullmatch(code):
60
+ if code in iso639retired: # retired codes
61
+ return iso639retired[code]
62
+ elif code in iso639short: # 3-letter codes
63
+ code2 = iso639short[code] # convert to 2-letter code
64
+ warn(f"Shortening {code!r} to {code2!r}", stacklevel=2)
65
+ tag = "-".join([code2] + tags[1:])
66
+ name = bcp47.name(tag) # parse according to BCP-47
67
+ if typ == "full":
68
+ return name # include all subtags
69
+ elif name:
70
+ return name.split(":")[0] # only the language subtag
71
+ else:
72
+ warn(f"Could not find code in {code!r}", stacklevel=2)
73
+
74
+
75
+ def langcode(name, typ=2):
76
+ """
77
+ Convert language name to iso639-3 language code. Returns the short 2-letter
78
+ code by default, if one is available, and the 3-letter code otherwise:
79
+
80
+ >>> from nltk.langnames import langcode
81
+ >>> langcode('Modern Greek (1453-)')
82
+ 'el'
83
+
84
+ Specify 'typ=3' to get the 3-letter code:
85
+
86
+ >>> langcode('Modern Greek (1453-)', typ=3)
87
+ 'ell'
88
+ """
89
+ if name in bcp47.langcode:
90
+ code = bcp47.langcode[name]
91
+ if typ == 3 and code in iso639long:
92
+ code = iso639long[code] # convert to 3-letter code
93
+ return code
94
+ elif name in iso639code_retired:
95
+ return iso639code_retired[name]
96
+ else:
97
+ warn(f"Could not find language in {name!r}", stacklevel=2)
98
+
99
+
100
+ # =======================================================================
101
+ # Translate betwwen Wikidata Q-codes and BCP-47 codes or names
102
+ # .......................................................................
103
+
104
+
105
+ def tag2q(tag):
106
+ """
107
+ Convert BCP-47 tag to Wikidata Q-code
108
+
109
+ >>> tag2q('nds-u-sd-demv')
110
+ 'Q4289225'
111
+ """
112
+ return bcp47.wiki_q[tag]
113
+
114
+
115
+ def q2tag(qcode):
116
+ """
117
+ Convert Wikidata Q-code to BCP-47 tag
118
+
119
+ >>> q2tag('Q4289225')
120
+ 'nds-u-sd-demv'
121
+ """
122
+ return wiki_bcp47[qcode]
123
+
124
+
125
+ def q2name(qcode, typ="full"):
126
+ """
127
+ Convert Wikidata Q-code to BCP-47 (full or short) language name
128
+
129
+ >>> q2name('Q4289225')
130
+ 'Low German: Mecklenburg-Vorpommern'
131
+
132
+ >>> q2name('Q4289225', "short")
133
+ 'Low German'
134
+ """
135
+ return langname(q2tag(qcode), typ)
136
+
137
+
138
+ def lang2q(name):
139
+ """
140
+ Convert simple language name to Wikidata Q-code
141
+
142
+ >>> lang2q('Low German')
143
+ 'Q25433'
144
+ """
145
+ return tag2q(langcode(name))
146
+
147
+
148
+ # ======================================================================
149
+ # Data dictionaries
150
+ # ......................................................................
151
+
152
+
153
+ def inverse_dict(dic):
154
+ """Return inverse mapping, but only if it is bijective"""
155
+ if len(dic.keys()) == len(set(dic.values())):
156
+ return {val: key for (key, val) in dic.items()}
157
+ else:
158
+ warn("This dictionary has no bijective inverse mapping.")
159
+
160
+
161
+ bcp47.load_wiki_q() # Wikidata conversion table needs to be loaded explicitly
162
+ wiki_bcp47 = inverse_dict(bcp47.wiki_q)
163
+
164
+ iso639short = {
165
+ "aar": "aa",
166
+ "abk": "ab",
167
+ "afr": "af",
168
+ "aka": "ak",
169
+ "amh": "am",
170
+ "ara": "ar",
171
+ "arg": "an",
172
+ "asm": "as",
173
+ "ava": "av",
174
+ "ave": "ae",
175
+ "aym": "ay",
176
+ "aze": "az",
177
+ "bak": "ba",
178
+ "bam": "bm",
179
+ "bel": "be",
180
+ "ben": "bn",
181
+ "bis": "bi",
182
+ "bod": "bo",
183
+ "bos": "bs",
184
+ "bre": "br",
185
+ "bul": "bg",
186
+ "cat": "ca",
187
+ "ces": "cs",
188
+ "cha": "ch",
189
+ "che": "ce",
190
+ "chu": "cu",
191
+ "chv": "cv",
192
+ "cor": "kw",
193
+ "cos": "co",
194
+ "cre": "cr",
195
+ "cym": "cy",
196
+ "dan": "da",
197
+ "deu": "de",
198
+ "div": "dv",
199
+ "dzo": "dz",
200
+ "ell": "el",
201
+ "eng": "en",
202
+ "epo": "eo",
203
+ "est": "et",
204
+ "eus": "eu",
205
+ "ewe": "ee",
206
+ "fao": "fo",
207
+ "fas": "fa",
208
+ "fij": "fj",
209
+ "fin": "fi",
210
+ "fra": "fr",
211
+ "fry": "fy",
212
+ "ful": "ff",
213
+ "gla": "gd",
214
+ "gle": "ga",
215
+ "glg": "gl",
216
+ "glv": "gv",
217
+ "grn": "gn",
218
+ "guj": "gu",
219
+ "hat": "ht",
220
+ "hau": "ha",
221
+ "hbs": "sh",
222
+ "heb": "he",
223
+ "her": "hz",
224
+ "hin": "hi",
225
+ "hmo": "ho",
226
+ "hrv": "hr",
227
+ "hun": "hu",
228
+ "hye": "hy",
229
+ "ibo": "ig",
230
+ "ido": "io",
231
+ "iii": "ii",
232
+ "iku": "iu",
233
+ "ile": "ie",
234
+ "ina": "ia",
235
+ "ind": "id",
236
+ "ipk": "ik",
237
+ "isl": "is",
238
+ "ita": "it",
239
+ "jav": "jv",
240
+ "jpn": "ja",
241
+ "kal": "kl",
242
+ "kan": "kn",
243
+ "kas": "ks",
244
+ "kat": "ka",
245
+ "kau": "kr",
246
+ "kaz": "kk",
247
+ "khm": "km",
248
+ "kik": "ki",
249
+ "kin": "rw",
250
+ "kir": "ky",
251
+ "kom": "kv",
252
+ "kon": "kg",
253
+ "kor": "ko",
254
+ "kua": "kj",
255
+ "kur": "ku",
256
+ "lao": "lo",
257
+ "lat": "la",
258
+ "lav": "lv",
259
+ "lim": "li",
260
+ "lin": "ln",
261
+ "lit": "lt",
262
+ "ltz": "lb",
263
+ "lub": "lu",
264
+ "lug": "lg",
265
+ "mah": "mh",
266
+ "mal": "ml",
267
+ "mar": "mr",
268
+ "mkd": "mk",
269
+ "mlg": "mg",
270
+ "mlt": "mt",
271
+ "mon": "mn",
272
+ "mri": "mi",
273
+ "msa": "ms",
274
+ "mya": "my",
275
+ "nau": "na",
276
+ "nav": "nv",
277
+ "nbl": "nr",
278
+ "nde": "nd",
279
+ "ndo": "ng",
280
+ "nep": "ne",
281
+ "nld": "nl",
282
+ "nno": "nn",
283
+ "nob": "nb",
284
+ "nor": "no",
285
+ "nya": "ny",
286
+ "oci": "oc",
287
+ "oji": "oj",
288
+ "ori": "or",
289
+ "orm": "om",
290
+ "oss": "os",
291
+ "pan": "pa",
292
+ "pli": "pi",
293
+ "pol": "pl",
294
+ "por": "pt",
295
+ "pus": "ps",
296
+ "que": "qu",
297
+ "roh": "rm",
298
+ "ron": "ro",
299
+ "run": "rn",
300
+ "rus": "ru",
301
+ "sag": "sg",
302
+ "san": "sa",
303
+ "sin": "si",
304
+ "slk": "sk",
305
+ "slv": "sl",
306
+ "sme": "se",
307
+ "smo": "sm",
308
+ "sna": "sn",
309
+ "snd": "sd",
310
+ "som": "so",
311
+ "sot": "st",
312
+ "spa": "es",
313
+ "sqi": "sq",
314
+ "srd": "sc",
315
+ "srp": "sr",
316
+ "ssw": "ss",
317
+ "sun": "su",
318
+ "swa": "sw",
319
+ "swe": "sv",
320
+ "tah": "ty",
321
+ "tam": "ta",
322
+ "tat": "tt",
323
+ "tel": "te",
324
+ "tgk": "tg",
325
+ "tgl": "tl",
326
+ "tha": "th",
327
+ "tir": "ti",
328
+ "ton": "to",
329
+ "tsn": "tn",
330
+ "tso": "ts",
331
+ "tuk": "tk",
332
+ "tur": "tr",
333
+ "twi": "tw",
334
+ "uig": "ug",
335
+ "ukr": "uk",
336
+ "urd": "ur",
337
+ "uzb": "uz",
338
+ "ven": "ve",
339
+ "vie": "vi",
340
+ "vol": "vo",
341
+ "wln": "wa",
342
+ "wol": "wo",
343
+ "xho": "xh",
344
+ "yid": "yi",
345
+ "yor": "yo",
346
+ "zha": "za",
347
+ "zho": "zh",
348
+ "zul": "zu",
349
+ }
350
+
351
+
352
+ iso639retired = {
353
+ "fri": "Western Frisian",
354
+ "auv": "Auvergnat",
355
+ "gsc": "Gascon",
356
+ "lms": "Limousin",
357
+ "lnc": "Languedocien",
358
+ "prv": "Provençal",
359
+ "amd": "Amapá Creole",
360
+ "bgh": "Bogan",
361
+ "bnh": "Banawá",
362
+ "bvs": "Belgian Sign Language",
363
+ "ccy": "Southern Zhuang",
364
+ "cit": "Chittagonian",
365
+ "flm": "Falam Chin",
366
+ "jap": "Jaruára",
367
+ "kob": "Kohoroxitari",
368
+ "mob": "Moinba",
369
+ "mzf": "Aiku",
370
+ "nhj": "Tlalitzlipa Nahuatl",
371
+ "nhs": "Southeastern Puebla Nahuatl",
372
+ "occ": "Occidental",
373
+ "tmx": "Tomyang",
374
+ "tot": "Patla-Chicontla Totonac",
375
+ "xmi": "Miarrã",
376
+ "yib": "Yinglish",
377
+ "ztc": "Lachirioag Zapotec",
378
+ "atf": "Atuence",
379
+ "bqe": "Navarro-Labourdin Basque",
380
+ "bsz": "Souletin Basque",
381
+ "aex": "Amerax",
382
+ "ahe": "Ahe",
383
+ "aiz": "Aari",
384
+ "akn": "Amikoana",
385
+ "arf": "Arafundi",
386
+ "azr": "Adzera",
387
+ "bcx": "Pamona",
388
+ "bii": "Bisu",
389
+ "bke": "Bengkulu",
390
+ "blu": "Hmong Njua",
391
+ "boc": "Bakung Kenyah",
392
+ "bsd": "Sarawak Bisaya",
393
+ "bwv": "Bahau River Kenyah",
394
+ "bxt": "Buxinhua",
395
+ "byu": "Buyang",
396
+ "ccx": "Northern Zhuang",
397
+ "cru": "Carútana",
398
+ "dat": "Darang Deng",
399
+ "dyk": "Land Dayak",
400
+ "eni": "Enim",
401
+ "fiz": "Izere",
402
+ "gen": "Geman Deng",
403
+ "ggh": "Garreh-Ajuran",
404
+ "itu": "Itutang",
405
+ "kds": "Lahu Shi",
406
+ "knh": "Kayan River Kenyah",
407
+ "krg": "North Korowai",
408
+ "krq": "Krui",
409
+ "kxg": "Katingan",
410
+ "lmt": "Lematang",
411
+ "lnt": "Lintang",
412
+ "lod": "Berawan",
413
+ "mbg": "Northern Nambikuára",
414
+ "mdo": "Southwest Gbaya",
415
+ "mhv": "Arakanese",
416
+ "miv": "Mimi",
417
+ "mqd": "Madang",
418
+ "nky": "Khiamniungan Naga",
419
+ "nxj": "Nyadu",
420
+ "ogn": "Ogan",
421
+ "ork": "Orokaiva",
422
+ "paj": "Ipeka-Tapuia",
423
+ "pec": "Southern Pesisir",
424
+ "pen": "Penesak",
425
+ "plm": "Palembang",
426
+ "poj": "Lower Pokomo",
427
+ "pun": "Pubian",
428
+ "rae": "Ranau",
429
+ "rjb": "Rajbanshi",
430
+ "rws": "Rawas",
431
+ "sdd": "Semendo",
432
+ "sdi": "Sindang Kelingi",
433
+ "skl": "Selako",
434
+ "slb": "Kahumamahon Saluan",
435
+ "srj": "Serawai",
436
+ "suf": "Tarpia",
437
+ "suh": "Suba",
438
+ "suu": "Sungkai",
439
+ "szk": "Sizaki",
440
+ "tle": "Southern Marakwet",
441
+ "tnj": "Tanjong",
442
+ "ttx": "Tutong 1",
443
+ "ubm": "Upper Baram Kenyah",
444
+ "vky": "Kayu Agung",
445
+ "vmo": "Muko-Muko",
446
+ "wre": "Ware",
447
+ "xah": "Kahayan",
448
+ "xkm": "Mahakam Kenyah",
449
+ "xuf": "Kunfal",
450
+ "yio": "Dayao Yi",
451
+ "ymj": "Muji Yi",
452
+ "ypl": "Pula Yi",
453
+ "ypw": "Puwa Yi",
454
+ "ywm": "Wumeng Yi",
455
+ "yym": "Yuanjiang-Mojiang Yi",
456
+ "mly": "Malay (individual language)",
457
+ "muw": "Mundari",
458
+ "xst": "Silt'e",
459
+ "ope": "Old Persian",
460
+ "scc": "Serbian",
461
+ "scr": "Croatian",
462
+ "xsk": "Sakan",
463
+ "mol": "Moldavian",
464
+ "aay": "Aariya",
465
+ "acc": "Cubulco Achí",
466
+ "cbm": "Yepocapa Southwestern Cakchiquel",
467
+ "chs": "Chumash",
468
+ "ckc": "Northern Cakchiquel",
469
+ "ckd": "South Central Cakchiquel",
470
+ "cke": "Eastern Cakchiquel",
471
+ "ckf": "Southern Cakchiquel",
472
+ "cki": "Santa María De Jesús Cakchiquel",
473
+ "ckj": "Santo Domingo Xenacoj Cakchiquel",
474
+ "ckk": "Acatenango Southwestern Cakchiquel",
475
+ "ckw": "Western Cakchiquel",
476
+ "cnm": "Ixtatán Chuj",
477
+ "cti": "Tila Chol",
478
+ "cun": "Cunén Quiché",
479
+ "eml": "Emiliano-Romagnolo",
480
+ "eur": "Europanto",
481
+ "gmo": "Gamo-Gofa-Dawro",
482
+ "hsf": "Southeastern Huastec",
483
+ "hva": "San Luís Potosí Huastec",
484
+ "ixi": "Nebaj Ixil",
485
+ "ixj": "Chajul Ixil",
486
+ "jai": "Western Jacalteco",
487
+ "mms": "Southern Mam",
488
+ "mpf": "Tajumulco Mam",
489
+ "mtz": "Tacanec",
490
+ "mvc": "Central Mam",
491
+ "mvj": "Todos Santos Cuchumatán Mam",
492
+ "poa": "Eastern Pokomam",
493
+ "pob": "Western Pokomchí",
494
+ "pou": "Southern Pokomam",
495
+ "ppv": "Papavô",
496
+ "quj": "Joyabaj Quiché",
497
+ "qut": "West Central Quiché",
498
+ "quu": "Eastern Quiché",
499
+ "qxi": "San Andrés Quiché",
500
+ "sic": "Malinguat",
501
+ "stc": "Santa Cruz",
502
+ "tlz": "Toala'",
503
+ "tzb": "Bachajón Tzeltal",
504
+ "tzc": "Chamula Tzotzil",
505
+ "tze": "Chenalhó Tzotzil",
506
+ "tzs": "San Andrés Larrainzar Tzotzil",
507
+ "tzt": "Western Tzutujil",
508
+ "tzu": "Huixtán Tzotzil",
509
+ "tzz": "Zinacantán Tzotzil",
510
+ "vlr": "Vatrata",
511
+ "yus": "Chan Santa Cruz Maya",
512
+ "nfg": "Nyeng",
513
+ "nfk": "Shakara",
514
+ "agp": "Paranan",
515
+ "bhk": "Albay Bicolano",
516
+ "bkb": "Finallig",
517
+ "btb": "Beti (Cameroon)",
518
+ "cjr": "Chorotega",
519
+ "cmk": "Chimakum",
520
+ "drh": "Darkhat",
521
+ "drw": "Darwazi",
522
+ "gav": "Gabutamon",
523
+ "mof": "Mohegan-Montauk-Narragansett",
524
+ "mst": "Cataelano Mandaya",
525
+ "myt": "Sangab Mandaya",
526
+ "rmr": "Caló",
527
+ "sgl": "Sanglechi-Ishkashimi",
528
+ "sul": "Surigaonon",
529
+ "sum": "Sumo-Mayangna",
530
+ "tnf": "Tangshewi",
531
+ "wgw": "Wagawaga",
532
+ "ayx": "Ayi (China)",
533
+ "bjq": "Southern Betsimisaraka Malagasy",
534
+ "dha": "Dhanwar (India)",
535
+ "dkl": "Kolum So Dogon",
536
+ "mja": "Mahei",
537
+ "nbf": "Naxi",
538
+ "noo": "Nootka",
539
+ "tie": "Tingal",
540
+ "tkk": "Takpa",
541
+ "baz": "Tunen",
542
+ "bjd": "Bandjigali",
543
+ "ccq": "Chaungtha",
544
+ "cka": "Khumi Awa Chin",
545
+ "dap": "Nisi (India)",
546
+ "dwl": "Walo Kumbe Dogon",
547
+ "elp": "Elpaputih",
548
+ "gbc": "Garawa",
549
+ "gio": "Gelao",
550
+ "hrr": "Horuru",
551
+ "ibi": "Ibilo",
552
+ "jar": "Jarawa (Nigeria)",
553
+ "kdv": "Kado",
554
+ "kgh": "Upper Tanudan Kalinga",
555
+ "kpp": "Paku Karen",
556
+ "kzh": "Kenuzi-Dongola",
557
+ "lcq": "Luhu",
558
+ "mgx": "Omati",
559
+ "nln": "Durango Nahuatl",
560
+ "pbz": "Palu",
561
+ "pgy": "Pongyong",
562
+ "sca": "Sansu",
563
+ "tlw": "South Wemale",
564
+ "unp": "Worora",
565
+ "wiw": "Wirangu",
566
+ "ybd": "Yangbye",
567
+ "yen": "Yendang",
568
+ "yma": "Yamphe",
569
+ "daf": "Dan",
570
+ "djl": "Djiwarli",
571
+ "ggr": "Aghu Tharnggalu",
572
+ "ilw": "Talur",
573
+ "izi": "Izi-Ezaa-Ikwo-Mgbo",
574
+ "meg": "Mea",
575
+ "mld": "Malakhel",
576
+ "mnt": "Maykulan",
577
+ "mwd": "Mudbura",
578
+ "myq": "Forest Maninka",
579
+ "nbx": "Ngura",
580
+ "nlr": "Ngarla",
581
+ "pcr": "Panang",
582
+ "ppr": "Piru",
583
+ "tgg": "Tangga",
584
+ "wit": "Wintu",
585
+ "xia": "Xiandao",
586
+ "yiy": "Yir Yoront",
587
+ "yos": "Yos",
588
+ "emo": "Emok",
589
+ "ggm": "Gugu Mini",
590
+ "leg": "Lengua",
591
+ "lmm": "Lamam",
592
+ "mhh": "Maskoy Pidgin",
593
+ "puz": "Purum Naga",
594
+ "sap": "Sanapaná",
595
+ "yuu": "Yugh",
596
+ "aam": "Aramanik",
597
+ "adp": "Adap",
598
+ "aue": "ǂKxʼauǁʼein",
599
+ "bmy": "Bemba (Democratic Republic of Congo)",
600
+ "bxx": "Borna (Democratic Republic of Congo)",
601
+ "byy": "Buya",
602
+ "dzd": "Daza",
603
+ "gfx": "Mangetti Dune ǃXung",
604
+ "gti": "Gbati-ri",
605
+ "ime": "Imeraguen",
606
+ "kbf": "Kakauhua",
607
+ "koj": "Sara Dunjo",
608
+ "kwq": "Kwak",
609
+ "kxe": "Kakihum",
610
+ "lii": "Lingkhim",
611
+ "mwj": "Maligo",
612
+ "nnx": "Ngong",
613
+ "oun": "ǃOǃung",
614
+ "pmu": "Mirpur Panjabi",
615
+ "sgo": "Songa",
616
+ "thx": "The",
617
+ "tsf": "Southwestern Tamang",
618
+ "uok": "Uokha",
619
+ "xsj": "Subi",
620
+ "yds": "Yiddish Sign Language",
621
+ "ymt": "Mator-Taygi-Karagas",
622
+ "ynh": "Yangho",
623
+ "bgm": "Baga Mboteni",
624
+ "btl": "Bhatola",
625
+ "cbe": "Chipiajes",
626
+ "cbh": "Cagua",
627
+ "coy": "Coyaima",
628
+ "cqu": "Chilean Quechua",
629
+ "cum": "Cumeral",
630
+ "duj": "Dhuwal",
631
+ "ggn": "Eastern Gurung",
632
+ "ggo": "Southern Gondi",
633
+ "guv": "Gey",
634
+ "iap": "Iapama",
635
+ "ill": "Iranun",
636
+ "kgc": "Kasseng",
637
+ "kox": "Coxima",
638
+ "ktr": "Kota Marudu Tinagas",
639
+ "kvs": "Kunggara",
640
+ "kzj": "Coastal Kadazan",
641
+ "kzt": "Tambunan Dusun",
642
+ "nad": "Nijadali",
643
+ "nts": "Natagaimas",
644
+ "ome": "Omejes",
645
+ "pmc": "Palumata",
646
+ "pod": "Ponares",
647
+ "ppa": "Pao",
648
+ "pry": "Pray 3",
649
+ "rna": "Runa",
650
+ "svr": "Savara",
651
+ "tdu": "Tempasuk Dusun",
652
+ "thc": "Tai Hang Tong",
653
+ "tid": "Tidong",
654
+ "tmp": "Tai Mène",
655
+ "tne": "Tinoc Kallahan",
656
+ "toe": "Tomedes",
657
+ "xba": "Kamba (Brazil)",
658
+ "xbx": "Kabixí",
659
+ "xip": "Xipináwa",
660
+ "xkh": "Karahawyana",
661
+ "yri": "Yarí",
662
+ "jeg": "Jeng",
663
+ "kgd": "Kataang",
664
+ "krm": "Krim",
665
+ "prb": "Lua'",
666
+ "puk": "Pu Ko",
667
+ "rie": "Rien",
668
+ "rsi": "Rennellese Sign Language",
669
+ "skk": "Sok",
670
+ "snh": "Shinabo",
671
+ "lsg": "Lyons Sign Language",
672
+ "mwx": "Mediak",
673
+ "mwy": "Mosiro",
674
+ "ncp": "Ndaktup",
675
+ "ais": "Nataoran Amis",
676
+ "asd": "Asas",
677
+ "dit": "Dirari",
678
+ "dud": "Hun-Saare",
679
+ "lba": "Lui",
680
+ "llo": "Khlor",
681
+ "myd": "Maramba",
682
+ "myi": "Mina (India)",
683
+ "nns": "Ningye",
684
+ "aoh": "Arma",
685
+ "ayy": "Tayabas Ayta",
686
+ "bbz": "Babalia Creole Arabic",
687
+ "bpb": "Barbacoas",
688
+ "cca": "Cauca",
689
+ "cdg": "Chamari",
690
+ "dgu": "Degaru",
691
+ "drr": "Dororo",
692
+ "ekc": "Eastern Karnic",
693
+ "gli": "Guliguli",
694
+ "kjf": "Khalaj",
695
+ "kxl": "Nepali Kurux",
696
+ "kxu": "Kui (India)",
697
+ "lmz": "Lumbee",
698
+ "nxu": "Narau",
699
+ "plp": "Palpa",
700
+ "sdm": "Semandang",
701
+ "tbb": "Tapeba",
702
+ "xrq": "Karranga",
703
+ "xtz": "Tasmanian",
704
+ "zir": "Ziriya",
705
+ "thw": "Thudam",
706
+ "bic": "Bikaru",
707
+ "bij": "Vaghat-Ya-Bijim-Legeri",
708
+ "blg": "Balau",
709
+ "gji": "Geji",
710
+ "mvm": "Muya",
711
+ "ngo": "Ngoni",
712
+ "pat": "Papitalai",
713
+ "vki": "Ija-Zuba",
714
+ "wra": "Warapu",
715
+ "ajt": "Judeo-Tunisian Arabic",
716
+ "cug": "Chungmboko",
717
+ "lak": "Laka (Nigeria)",
718
+ "lno": "Lango (South Sudan)",
719
+ "pii": "Pini",
720
+ "smd": "Sama",
721
+ "snb": "Sebuyau",
722
+ "uun": "Kulon-Pazeh",
723
+ "wrd": "Warduji",
724
+ "wya": "Wyandot",
725
+ }
726
+
727
+
728
+ iso639long = inverse_dict(iso639short)
729
+
730
+ iso639code_retired = inverse_dict(iso639retired)
llmeval-env/lib/python3.10/site-packages/nltk/lazyimport.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module is from mx/DateTime/LazyModule.py and is
2
+ # distributed under the terms of the eGenix.com Public License Agreement
3
+ # https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf
4
+
5
+ """ Helper to enable simple lazy module import.
6
+
7
+ 'Lazy' means the actual import is deferred until an attribute is
8
+ requested from the module's namespace. This has the advantage of
9
+ allowing all imports to be done at the top of a script (in a
10
+ prominent and visible place) without having a great impact
11
+ on startup time.
12
+
13
+ Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:[email protected]
14
+ See the documentation for further information on copyrights,
15
+ or contact the author. All Rights Reserved.
16
+ """
17
+
18
+ ### Constants
19
+
20
+ _debug = 0
21
+
22
+ ###
23
+
24
+
25
+ class LazyModule:
26
+
27
+ """Lazy module class.
28
+
29
+ Lazy modules are imported into the given namespaces whenever a
30
+ non-special attribute (there are some attributes like __doc__
31
+ that class instances handle without calling __getattr__) is
32
+ requested. The module is then registered under the given name
33
+ in locals usually replacing the import wrapper instance. The
34
+ import itself is done using globals as global namespace.
35
+
36
+ Example of creating a lazy load module:
37
+
38
+ ISO = LazyModule('ISO',locals(),globals())
39
+
40
+ Later, requesting an attribute from ISO will load the module
41
+ automatically into the locals() namespace, overriding the
42
+ LazyModule instance:
43
+
44
+ t = ISO.Week(1998,1,1)
45
+
46
+ """
47
+
48
+ # Flag which indicates whether the LazyModule is initialized or not
49
+ __lazymodule_init = 0
50
+
51
+ # Name of the module to load
52
+ __lazymodule_name = ""
53
+
54
+ # Flag which indicates whether the module was loaded or not
55
+ __lazymodule_loaded = 0
56
+
57
+ # Locals dictionary where to register the module
58
+ __lazymodule_locals = None
59
+
60
+ # Globals dictionary to use for the module import
61
+ __lazymodule_globals = None
62
+
63
+ def __init__(self, name, locals, globals=None):
64
+
65
+ """Create a LazyModule instance wrapping module name.
66
+
67
+ The module will later on be registered in locals under the
68
+ given module name.
69
+
70
+ globals is optional and defaults to locals.
71
+
72
+ """
73
+ self.__lazymodule_locals = locals
74
+ if globals is None:
75
+ globals = locals
76
+ self.__lazymodule_globals = globals
77
+ mainname = globals.get("__name__", "")
78
+ if mainname:
79
+ self.__name__ = mainname + "." + name
80
+ self.__lazymodule_name = name
81
+ else:
82
+ self.__name__ = self.__lazymodule_name = name
83
+ self.__lazymodule_init = 1
84
+
85
+ def __lazymodule_import(self):
86
+
87
+ """Import the module now."""
88
+ # Load and register module
89
+ local_name = self.__lazymodule_name # e.g. "toolbox"
90
+ full_name = self.__name__ # e.g. "nltk.toolbox"
91
+ if self.__lazymodule_loaded:
92
+ return self.__lazymodule_locals[local_name]
93
+ if _debug:
94
+ print("LazyModule: Loading module %r" % full_name)
95
+ self.__lazymodule_locals[local_name] = module = __import__(
96
+ full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*"
97
+ )
98
+
99
+ # Fill namespace with all symbols from original module to
100
+ # provide faster access.
101
+ self.__dict__.update(module.__dict__)
102
+
103
+ # Set import flag
104
+ self.__dict__["__lazymodule_loaded"] = 1
105
+
106
+ if _debug:
107
+ print("LazyModule: Module %r loaded" % full_name)
108
+ return module
109
+
110
+ def __getattr__(self, name):
111
+
112
+ """Import the module on demand and get the attribute."""
113
+ if self.__lazymodule_loaded:
114
+ raise AttributeError(name)
115
+ if _debug:
116
+ print(
117
+ "LazyModule: "
118
+ "Module load triggered by attribute %r read access" % name
119
+ )
120
+ module = self.__lazymodule_import()
121
+ return getattr(module, name)
122
+
123
+ def __setattr__(self, name, value):
124
+
125
+ """Import the module on demand and set the attribute."""
126
+ if not self.__lazymodule_init:
127
+ self.__dict__[name] = value
128
+ return
129
+ if self.__lazymodule_loaded:
130
+ self.__lazymodule_locals[self.__lazymodule_name] = value
131
+ self.__dict__[name] = value
132
+ return
133
+ if _debug:
134
+ print(
135
+ "LazyModule: "
136
+ "Module load triggered by attribute %r write access" % name
137
+ )
138
+ module = self.__lazymodule_import()
139
+ setattr(module, name, value)
140
+
141
+ def __repr__(self):
142
+ return "<LazyModule '%s'>" % self.__name__
llmeval-env/lib/python3.10/site-packages/nltk/probability.py ADDED
@@ -0,0 +1,2578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Probability and Statistics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (additions)
6
+ # Trevor Cohn <[email protected]> (additions)
7
+ # Peter Ljunglöf <[email protected]> (additions)
8
+ # Liang Dong <[email protected]> (additions)
9
+ # Geoffrey Sampson <[email protected]> (additions)
10
+ # Ilia Kurenkov <[email protected]> (additions)
11
+ #
12
+ # URL: <https://www.nltk.org/>
13
+ # For license information, see LICENSE.TXT
14
+
15
+ """
16
+ Classes for representing and processing probabilistic information.
17
+
18
+ The ``FreqDist`` class is used to encode "frequency distributions",
19
+ which count the number of times that each outcome of an experiment
20
+ occurs.
21
+
22
+ The ``ProbDistI`` class defines a standard interface for "probability
23
+ distributions", which encode the probability of each outcome for an
24
+ experiment. There are two types of probability distribution:
25
+
26
+ - "derived probability distributions" are created from frequency
27
+ distributions. They attempt to model the probability distribution
28
+ that generated the frequency distribution.
29
+ - "analytic probability distributions" are created directly from
30
+ parameters (such as variance).
31
+
32
+ The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
33
+ are used to encode conditional distributions. Conditional probability
34
+ distributions can be derived or analytic; but currently the only
35
+ implementation of the ``ConditionalProbDistI`` interface is
36
+ ``ConditionalProbDist``, a derived distribution.
37
+
38
+ """
39
+
40
+ import array
41
+ import math
42
+ import random
43
+ import warnings
44
+ from abc import ABCMeta, abstractmethod
45
+ from collections import Counter, defaultdict
46
+ from functools import reduce
47
+
48
+ from nltk.internals import raise_unorderable_types
49
+
50
+ _NINF = float("-1e300")
51
+
52
+ ##//////////////////////////////////////////////////////
53
+ ## Frequency Distributions
54
+ ##//////////////////////////////////////////////////////
55
+
56
+
57
+ class FreqDist(Counter):
58
+ """
59
+ A frequency distribution for the outcomes of an experiment. A
60
+ frequency distribution records the number of times each outcome of
61
+ an experiment has occurred. For example, a frequency distribution
62
+ could be used to record the frequency of each word type in a
63
+ document. Formally, a frequency distribution can be defined as a
64
+ function mapping from each sample to the number of times that
65
+ sample occurred as an outcome.
66
+
67
+ Frequency distributions are generally constructed by running a
68
+ number of experiments, and incrementing the count for a sample
69
+ every time it is an outcome of an experiment. For example, the
70
+ following code will produce a frequency distribution that encodes
71
+ how often each word occurs in a text:
72
+
73
+ >>> from nltk.tokenize import word_tokenize
74
+ >>> from nltk.probability import FreqDist
75
+ >>> sent = 'This is an example sentence'
76
+ >>> fdist = FreqDist()
77
+ >>> for word in word_tokenize(sent):
78
+ ... fdist[word.lower()] += 1
79
+
80
+ An equivalent way to do this is with the initializer:
81
+
82
+ >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
83
+
84
+ """
85
+
86
+ def __init__(self, samples=None):
87
+ """
88
+ Construct a new frequency distribution. If ``samples`` is
89
+ given, then the frequency distribution will be initialized
90
+ with the count of each object in ``samples``; otherwise, it
91
+ will be initialized to be empty.
92
+
93
+ In particular, ``FreqDist()`` returns an empty frequency
94
+ distribution; and ``FreqDist(samples)`` first creates an empty
95
+ frequency distribution, and then calls ``update`` with the
96
+ list ``samples``.
97
+
98
+ :param samples: The samples to initialize the frequency
99
+ distribution with.
100
+ :type samples: Sequence
101
+ """
102
+ Counter.__init__(self, samples)
103
+
104
+ # Cached number of samples in this FreqDist
105
+ self._N = None
106
+
107
+ def N(self):
108
+ """
109
+ Return the total number of sample outcomes that have been
110
+ recorded by this FreqDist. For the number of unique
111
+ sample values (or bins) with counts greater than zero, use
112
+ ``FreqDist.B()``.
113
+
114
+ :rtype: int
115
+ """
116
+ if self._N is None:
117
+ # Not already cached, or cache has been invalidated
118
+ self._N = sum(self.values())
119
+ return self._N
120
+
121
+ def __setitem__(self, key, val):
122
+ """
123
+ Override ``Counter.__setitem__()`` to invalidate the cached N
124
+ """
125
+ self._N = None
126
+ super().__setitem__(key, val)
127
+
128
+ def __delitem__(self, key):
129
+ """
130
+ Override ``Counter.__delitem__()`` to invalidate the cached N
131
+ """
132
+ self._N = None
133
+ super().__delitem__(key)
134
+
135
+ def update(self, *args, **kwargs):
136
+ """
137
+ Override ``Counter.update()`` to invalidate the cached N
138
+ """
139
+ self._N = None
140
+ super().update(*args, **kwargs)
141
+
142
+ def setdefault(self, key, val):
143
+ """
144
+ Override ``Counter.setdefault()`` to invalidate the cached N
145
+ """
146
+ self._N = None
147
+ super().setdefault(key, val)
148
+
149
+ def B(self):
150
+ """
151
+ Return the total number of sample values (or "bins") that
152
+ have counts greater than zero. For the total
153
+ number of sample outcomes recorded, use ``FreqDist.N()``.
154
+ (FreqDist.B() is the same as len(FreqDist).)
155
+
156
+ :rtype: int
157
+ """
158
+ return len(self)
159
+
160
+ def hapaxes(self):
161
+ """
162
+ Return a list of all samples that occur once (hapax legomena)
163
+
164
+ :rtype: list
165
+ """
166
+ return [item for item in self if self[item] == 1]
167
+
168
+ def Nr(self, r, bins=None):
169
+ return self.r_Nr(bins)[r]
170
+
171
+ def r_Nr(self, bins=None):
172
+ """
173
+ Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
174
+
175
+ :type bins: int
176
+ :param bins: The number of possible sample outcomes. ``bins``
177
+ is used to calculate Nr(0). In particular, Nr(0) is
178
+ ``bins-self.B()``. If ``bins`` is not specified, it
179
+ defaults to ``self.B()`` (so Nr(0) will be 0).
180
+ :rtype: int
181
+ """
182
+
183
+ _r_Nr = defaultdict(int)
184
+ for count in self.values():
185
+ _r_Nr[count] += 1
186
+
187
+ # Special case for Nr[0]:
188
+ _r_Nr[0] = bins - self.B() if bins is not None else 0
189
+
190
+ return _r_Nr
191
+
192
+ def _cumulative_frequencies(self, samples):
193
+ """
194
+ Return the cumulative frequencies of the specified samples.
195
+ If no samples are specified, all counts are returned, starting
196
+ with the largest.
197
+
198
+ :param samples: the samples whose frequencies should be returned.
199
+ :type samples: any
200
+ :rtype: list(float)
201
+ """
202
+ cf = 0.0
203
+ for sample in samples:
204
+ cf += self[sample]
205
+ yield cf
206
+
207
+ # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
208
+ # here, freq() does probs
209
+ def freq(self, sample):
210
+ """
211
+ Return the frequency of a given sample. The frequency of a
212
+ sample is defined as the count of that sample divided by the
213
+ total number of sample outcomes that have been recorded by
214
+ this FreqDist. The count of a sample is defined as the
215
+ number of times that sample outcome was recorded by this
216
+ FreqDist. Frequencies are always real numbers in the range
217
+ [0, 1].
218
+
219
+ :param sample: the sample whose frequency
220
+ should be returned.
221
+ :type sample: any
222
+ :rtype: float
223
+ """
224
+ n = self.N()
225
+ if n == 0:
226
+ return 0
227
+ return self[sample] / n
228
+
229
+ def max(self):
230
+ """
231
+ Return the sample with the greatest number of outcomes in this
232
+ frequency distribution. If two or more samples have the same
233
+ number of outcomes, return one of them; which sample is
234
+ returned is undefined. If no outcomes have occurred in this
235
+ frequency distribution, return None.
236
+
237
+ :return: The sample with the maximum number of outcomes in this
238
+ frequency distribution.
239
+ :rtype: any or None
240
+ """
241
+ if len(self) == 0:
242
+ raise ValueError(
243
+ "A FreqDist must have at least one sample before max is defined."
244
+ )
245
+ return self.most_common(1)[0][0]
246
+
247
+ def plot(
248
+ self, *args, title="", cumulative=False, percents=False, show=True, **kwargs
249
+ ):
250
+ """
251
+ Plot samples from the frequency distribution
252
+ displaying the most frequent sample first. If an integer
253
+ parameter is supplied, stop after this many samples have been
254
+ plotted. For a cumulative plot, specify cumulative=True. Additional
255
+ ``**kwargs`` are passed to matplotlib's plot function.
256
+ (Requires Matplotlib to be installed.)
257
+
258
+ :param title: The title for the graph.
259
+ :type title: str
260
+ :param cumulative: Whether the plot is cumulative. (default = False)
261
+ :type cumulative: bool
262
+ :param percents: Whether the plot uses percents instead of counts. (default = False)
263
+ :type percents: bool
264
+ :param show: Whether to show the plot, or only return the ax.
265
+ :type show: bool
266
+ """
267
+ try:
268
+ import matplotlib.pyplot as plt
269
+ except ImportError as e:
270
+ raise ValueError(
271
+ "The plot function requires matplotlib to be installed."
272
+ "See https://matplotlib.org/"
273
+ ) from e
274
+
275
+ if len(args) == 0:
276
+ args = [len(self)]
277
+ samples = [item for item, _ in self.most_common(*args)]
278
+
279
+ if cumulative:
280
+ freqs = list(self._cumulative_frequencies(samples))
281
+ ylabel = "Cumulative "
282
+ else:
283
+ freqs = [self[sample] for sample in samples]
284
+ ylabel = ""
285
+
286
+ if percents:
287
+ freqs = [f / self.N() * 100 for f in freqs]
288
+ ylabel += "Percents"
289
+ else:
290
+ ylabel += "Counts"
291
+
292
+ ax = plt.gca()
293
+ ax.grid(True, color="silver")
294
+
295
+ if "linewidth" not in kwargs:
296
+ kwargs["linewidth"] = 2
297
+ if title:
298
+ ax.set_title(title)
299
+
300
+ ax.plot(freqs, **kwargs)
301
+ ax.set_xticks(range(len(samples)))
302
+ ax.set_xticklabels([str(s) for s in samples], rotation=90)
303
+ ax.set_xlabel("Samples")
304
+ ax.set_ylabel(ylabel)
305
+
306
+ if show:
307
+ plt.show()
308
+
309
+ return ax
310
+
311
+ def tabulate(self, *args, **kwargs):
312
+ """
313
+ Tabulate the given samples from the frequency distribution (cumulative),
314
+ displaying the most frequent sample first. If an integer
315
+ parameter is supplied, stop after this many samples have been
316
+ plotted.
317
+
318
+ :param samples: The samples to plot (default is all samples)
319
+ :type samples: list
320
+ :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
321
+ :type title: bool
322
+ """
323
+ if len(args) == 0:
324
+ args = [len(self)]
325
+ samples = _get_kwarg(
326
+ kwargs, "samples", [item for item, _ in self.most_common(*args)]
327
+ )
328
+
329
+ cumulative = _get_kwarg(kwargs, "cumulative", False)
330
+ if cumulative:
331
+ freqs = list(self._cumulative_frequencies(samples))
332
+ else:
333
+ freqs = [self[sample] for sample in samples]
334
+ # percents = [f * 100 for f in freqs] only in ProbDist?
335
+
336
+ width = max(len(f"{s}") for s in samples)
337
+ width = max(width, max(len("%d" % f) for f in freqs))
338
+
339
+ for i in range(len(samples)):
340
+ print("%*s" % (width, samples[i]), end=" ")
341
+ print()
342
+ for i in range(len(samples)):
343
+ print("%*d" % (width, freqs[i]), end=" ")
344
+ print()
345
+
346
+ def copy(self):
347
+ """
348
+ Create a copy of this frequency distribution.
349
+
350
+ :rtype: FreqDist
351
+ """
352
+ return self.__class__(self)
353
+
354
+ # Mathematical operatiors
355
+
356
+ def __add__(self, other):
357
+ """
358
+ Add counts from two counters.
359
+
360
+ >>> FreqDist('abbb') + FreqDist('bcc')
361
+ FreqDist({'b': 4, 'c': 2, 'a': 1})
362
+
363
+ """
364
+ return self.__class__(super().__add__(other))
365
+
366
+ def __sub__(self, other):
367
+ """
368
+ Subtract count, but keep only results with positive counts.
369
+
370
+ >>> FreqDist('abbbc') - FreqDist('bccd')
371
+ FreqDist({'b': 2, 'a': 1})
372
+
373
+ """
374
+ return self.__class__(super().__sub__(other))
375
+
376
+ def __or__(self, other):
377
+ """
378
+ Union is the maximum of value in either of the input counters.
379
+
380
+ >>> FreqDist('abbb') | FreqDist('bcc')
381
+ FreqDist({'b': 3, 'c': 2, 'a': 1})
382
+
383
+ """
384
+ return self.__class__(super().__or__(other))
385
+
386
+ def __and__(self, other):
387
+ """
388
+ Intersection is the minimum of corresponding counts.
389
+
390
+ >>> FreqDist('abbb') & FreqDist('bcc')
391
+ FreqDist({'b': 1})
392
+
393
+ """
394
+ return self.__class__(super().__and__(other))
395
+
396
+ def __le__(self, other):
397
+ """
398
+ Returns True if this frequency distribution is a subset of the other
399
+ and for no key the value exceeds the value of the same key from
400
+ the other frequency distribution.
401
+
402
+ The <= operator forms partial order and satisfying the axioms
403
+ reflexivity, antisymmetry and transitivity.
404
+
405
+ >>> FreqDist('a') <= FreqDist('a')
406
+ True
407
+ >>> a = FreqDist('abc')
408
+ >>> b = FreqDist('aabc')
409
+ >>> (a <= b, b <= a)
410
+ (True, False)
411
+ >>> FreqDist('a') <= FreqDist('abcd')
412
+ True
413
+ >>> FreqDist('abc') <= FreqDist('xyz')
414
+ False
415
+ >>> FreqDist('xyz') <= FreqDist('abc')
416
+ False
417
+ >>> c = FreqDist('a')
418
+ >>> d = FreqDist('aa')
419
+ >>> e = FreqDist('aaa')
420
+ >>> c <= d and d <= e and c <= e
421
+ True
422
+ """
423
+ if not isinstance(other, FreqDist):
424
+ raise_unorderable_types("<=", self, other)
425
+ return set(self).issubset(other) and all(
426
+ self[key] <= other[key] for key in self
427
+ )
428
+
429
+ def __ge__(self, other):
430
+ if not isinstance(other, FreqDist):
431
+ raise_unorderable_types(">=", self, other)
432
+ return set(self).issuperset(other) and all(
433
+ self[key] >= other[key] for key in other
434
+ )
435
+
436
+ __lt__ = lambda self, other: self <= other and not self == other
437
+ __gt__ = lambda self, other: self >= other and not self == other
438
+
439
+ def __repr__(self):
440
+ """
441
+ Return a string representation of this FreqDist.
442
+
443
+ :rtype: string
444
+ """
445
+ return self.pformat()
446
+
447
+ def pprint(self, maxlen=10, stream=None):
448
+ """
449
+ Print a string representation of this FreqDist to 'stream'
450
+
451
+ :param maxlen: The maximum number of items to print
452
+ :type maxlen: int
453
+ :param stream: The stream to print to. stdout by default
454
+ """
455
+ print(self.pformat(maxlen=maxlen), file=stream)
456
+
457
+ def pformat(self, maxlen=10):
458
+ """
459
+ Return a string representation of this FreqDist.
460
+
461
+ :param maxlen: The maximum number of items to display
462
+ :type maxlen: int
463
+ :rtype: string
464
+ """
465
+ items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)]
466
+ if len(self) > maxlen:
467
+ items.append("...")
468
+ return "FreqDist({{{0}}})".format(", ".join(items))
469
+
470
+ def __str__(self):
471
+ """
472
+ Return a string representation of this FreqDist.
473
+
474
+ :rtype: string
475
+ """
476
+ return "<FreqDist with %d samples and %d outcomes>" % (len(self), self.N())
477
+
478
+ def __iter__(self):
479
+ """
480
+ Return an iterator which yields tokens ordered by frequency.
481
+
482
+ :rtype: iterator
483
+ """
484
+ for token, _ in self.most_common(self.B()):
485
+ yield token
486
+
487
+
488
+ ##//////////////////////////////////////////////////////
489
+ ## Probability Distributions
490
+ ##//////////////////////////////////////////////////////
491
+
492
+
493
+ class ProbDistI(metaclass=ABCMeta):
494
+ """
495
+ A probability distribution for the outcomes of an experiment. A
496
+ probability distribution specifies how likely it is that an
497
+ experiment will have any given outcome. For example, a
498
+ probability distribution could be used to predict the probability
499
+ that a token in a document will have a given type. Formally, a
500
+ probability distribution can be defined as a function mapping from
501
+ samples to nonnegative real numbers, such that the sum of every
502
+ number in the function's range is 1.0. A ``ProbDist`` is often
503
+ used to model the probability distribution of the experiment used
504
+ to generate a frequency distribution.
505
+ """
506
+
507
+ SUM_TO_ONE = True
508
+ """True if the probabilities of the samples in this probability
509
+ distribution will always sum to one."""
510
+
511
+ @abstractmethod
512
+ def __init__(self):
513
+ """
514
+ Classes inheriting from ProbDistI should implement __init__.
515
+ """
516
+
517
+ @abstractmethod
518
+ def prob(self, sample):
519
+ """
520
+ Return the probability for a given sample. Probabilities
521
+ are always real numbers in the range [0, 1].
522
+
523
+ :param sample: The sample whose probability
524
+ should be returned.
525
+ :type sample: any
526
+ :rtype: float
527
+ """
528
+
529
+ def logprob(self, sample):
530
+ """
531
+ Return the base 2 logarithm of the probability for a given sample.
532
+
533
+ :param sample: The sample whose probability
534
+ should be returned.
535
+ :type sample: any
536
+ :rtype: float
537
+ """
538
+ # Default definition, in terms of prob()
539
+ p = self.prob(sample)
540
+ return math.log(p, 2) if p != 0 else _NINF
541
+
542
+ @abstractmethod
543
+ def max(self):
544
+ """
545
+ Return the sample with the greatest probability. If two or
546
+ more samples have the same probability, return one of them;
547
+ which sample is returned is undefined.
548
+
549
+ :rtype: any
550
+ """
551
+
552
+ @abstractmethod
553
+ def samples(self):
554
+ """
555
+ Return a list of all samples that have nonzero probabilities.
556
+ Use ``prob`` to find the probability of each sample.
557
+
558
+ :rtype: list
559
+ """
560
+
561
+ # cf self.SUM_TO_ONE
562
+ def discount(self):
563
+ """
564
+ Return the ratio by which counts are discounted on average: c*/c
565
+
566
+ :rtype: float
567
+ """
568
+ return 0.0
569
+
570
+ # Subclasses should define more efficient implementations of this,
571
+ # where possible.
572
+ def generate(self):
573
+ """
574
+ Return a randomly selected sample from this probability distribution.
575
+ The probability of returning each sample ``samp`` is equal to
576
+ ``self.prob(samp)``.
577
+ """
578
+ p = random.random()
579
+ p_init = p
580
+ for sample in self.samples():
581
+ p -= self.prob(sample)
582
+ if p <= 0:
583
+ return sample
584
+ # allow for some rounding error:
585
+ if p < 0.0001:
586
+ return sample
587
+ # we *should* never get here
588
+ if self.SUM_TO_ONE:
589
+ warnings.warn(
590
+ "Probability distribution %r sums to %r; generate()"
591
+ " is returning an arbitrary sample." % (self, p_init - p)
592
+ )
593
+ return random.choice(list(self.samples()))
594
+
595
+
596
+ class UniformProbDist(ProbDistI):
597
+ """
598
+ A probability distribution that assigns equal probability to each
599
+ sample in a given set; and a zero probability to all other
600
+ samples.
601
+ """
602
+
603
+ def __init__(self, samples):
604
+ """
605
+ Construct a new uniform probability distribution, that assigns
606
+ equal probability to each sample in ``samples``.
607
+
608
+ :param samples: The samples that should be given uniform
609
+ probability.
610
+ :type samples: list
611
+ :raise ValueError: If ``samples`` is empty.
612
+ """
613
+ if len(samples) == 0:
614
+ raise ValueError(
615
+ "A Uniform probability distribution must " + "have at least one sample."
616
+ )
617
+ self._sampleset = set(samples)
618
+ self._prob = 1.0 / len(self._sampleset)
619
+ self._samples = list(self._sampleset)
620
+
621
+ def prob(self, sample):
622
+ return self._prob if sample in self._sampleset else 0
623
+
624
+ def max(self):
625
+ return self._samples[0]
626
+
627
+ def samples(self):
628
+ return self._samples
629
+
630
+ def __repr__(self):
631
+ return "<UniformProbDist with %d samples>" % len(self._sampleset)
632
+
633
+
634
+ class RandomProbDist(ProbDistI):
635
+ """
636
+ Generates a random probability distribution whereby each sample
637
+ will be between 0 and 1 with equal probability (uniform random distribution.
638
+ Also called a continuous uniform distribution).
639
+ """
640
+
641
+ def __init__(self, samples):
642
+ if len(samples) == 0:
643
+ raise ValueError(
644
+ "A probability distribution must " + "have at least one sample."
645
+ )
646
+ self._probs = self.unirand(samples)
647
+ self._samples = list(self._probs.keys())
648
+
649
+ @classmethod
650
+ def unirand(cls, samples):
651
+ """
652
+ The key function that creates a randomized initial distribution
653
+ that still sums to 1. Set as a dictionary of prob values so that
654
+ it can still be passed to MutableProbDist and called with identical
655
+ syntax to UniformProbDist
656
+ """
657
+ samples = set(samples)
658
+ randrow = [random.random() for i in range(len(samples))]
659
+ total = sum(randrow)
660
+ for i, x in enumerate(randrow):
661
+ randrow[i] = x / total
662
+
663
+ total = sum(randrow)
664
+ if total != 1:
665
+ # this difference, if present, is so small (near NINF) that it
666
+ # can be subtracted from any element without risking probs not (0 1)
667
+ randrow[-1] -= total - 1
668
+
669
+ return {s: randrow[i] for i, s in enumerate(samples)}
670
+
671
+ def max(self):
672
+ if not hasattr(self, "_max"):
673
+ self._max = max((p, v) for (v, p) in self._probs.items())[1]
674
+ return self._max
675
+
676
+ def prob(self, sample):
677
+ return self._probs.get(sample, 0)
678
+
679
+ def samples(self):
680
+ return self._samples
681
+
682
+ def __repr__(self):
683
+ return "<RandomUniformProbDist with %d samples>" % len(self._probs)
684
+
685
+
686
+ class DictionaryProbDist(ProbDistI):
687
+ """
688
+ A probability distribution whose probabilities are directly
689
+ specified by a given dictionary. The given dictionary maps
690
+ samples to probabilities.
691
+ """
692
+
693
+ def __init__(self, prob_dict=None, log=False, normalize=False):
694
+ """
695
+ Construct a new probability distribution from the given
696
+ dictionary, which maps values to probabilities (or to log
697
+ probabilities, if ``log`` is true). If ``normalize`` is
698
+ true, then the probability values are scaled by a constant
699
+ factor such that they sum to 1.
700
+
701
+ If called without arguments, the resulting probability
702
+ distribution assigns zero probability to all values.
703
+ """
704
+
705
+ self._prob_dict = prob_dict.copy() if prob_dict is not None else {}
706
+ self._log = log
707
+
708
+ # Normalize the distribution, if requested.
709
+ if normalize:
710
+ if len(prob_dict) == 0:
711
+ raise ValueError(
712
+ "A DictionaryProbDist must have at least one sample "
713
+ + "before it can be normalized."
714
+ )
715
+ if log:
716
+ value_sum = sum_logs(list(self._prob_dict.values()))
717
+ if value_sum <= _NINF:
718
+ logp = math.log(1.0 / len(prob_dict), 2)
719
+ for x in prob_dict:
720
+ self._prob_dict[x] = logp
721
+ else:
722
+ for (x, p) in self._prob_dict.items():
723
+ self._prob_dict[x] -= value_sum
724
+ else:
725
+ value_sum = sum(self._prob_dict.values())
726
+ if value_sum == 0:
727
+ p = 1.0 / len(prob_dict)
728
+ for x in prob_dict:
729
+ self._prob_dict[x] = p
730
+ else:
731
+ norm_factor = 1.0 / value_sum
732
+ for (x, p) in self._prob_dict.items():
733
+ self._prob_dict[x] *= norm_factor
734
+
735
+ def prob(self, sample):
736
+ if self._log:
737
+ return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0
738
+ else:
739
+ return self._prob_dict.get(sample, 0)
740
+
741
+ def logprob(self, sample):
742
+ if self._log:
743
+ return self._prob_dict.get(sample, _NINF)
744
+ else:
745
+ if sample not in self._prob_dict:
746
+ return _NINF
747
+ elif self._prob_dict[sample] == 0:
748
+ return _NINF
749
+ else:
750
+ return math.log(self._prob_dict[sample], 2)
751
+
752
+ def max(self):
753
+ if not hasattr(self, "_max"):
754
+ self._max = max((p, v) for (v, p) in self._prob_dict.items())[1]
755
+ return self._max
756
+
757
+ def samples(self):
758
+ return self._prob_dict.keys()
759
+
760
+ def __repr__(self):
761
+ return "<ProbDist with %d samples>" % len(self._prob_dict)
762
+
763
+
764
+ class MLEProbDist(ProbDistI):
765
+ """
766
+ The maximum likelihood estimate for the probability distribution
767
+ of the experiment used to generate a frequency distribution. The
768
+ "maximum likelihood estimate" approximates the probability of
769
+ each sample as the frequency of that sample in the frequency
770
+ distribution.
771
+ """
772
+
773
+ def __init__(self, freqdist, bins=None):
774
+ """
775
+ Use the maximum likelihood estimate to create a probability
776
+ distribution for the experiment used to generate ``freqdist``.
777
+
778
+ :type freqdist: FreqDist
779
+ :param freqdist: The frequency distribution that the
780
+ probability estimates should be based on.
781
+ """
782
+ self._freqdist = freqdist
783
+
784
+ def freqdist(self):
785
+ """
786
+ Return the frequency distribution that this probability
787
+ distribution is based on.
788
+
789
+ :rtype: FreqDist
790
+ """
791
+ return self._freqdist
792
+
793
+ def prob(self, sample):
794
+ return self._freqdist.freq(sample)
795
+
796
+ def max(self):
797
+ return self._freqdist.max()
798
+
799
+ def samples(self):
800
+ return self._freqdist.keys()
801
+
802
+ def __repr__(self):
803
+ """
804
+ :rtype: str
805
+ :return: A string representation of this ``ProbDist``.
806
+ """
807
+ return "<MLEProbDist based on %d samples>" % self._freqdist.N()
808
+
809
+
810
+ class LidstoneProbDist(ProbDistI):
811
+ """
812
+ The Lidstone estimate for the probability distribution of the
813
+ experiment used to generate a frequency distribution. The
814
+ "Lidstone estimate" is parameterized by a real number *gamma*,
815
+ which typically ranges from 0 to 1. The Lidstone estimate
816
+ approximates the probability of a sample with count *c* from an
817
+ experiment with *N* outcomes and *B* bins as
818
+ ``c+gamma)/(N+B*gamma)``. This is equivalent to adding
819
+ *gamma* to the count for each bin, and taking the maximum
820
+ likelihood estimate of the resulting frequency distribution.
821
+ """
822
+
823
+ SUM_TO_ONE = False
824
+
825
+ def __init__(self, freqdist, gamma, bins=None):
826
+ """
827
+ Use the Lidstone estimate to create a probability distribution
828
+ for the experiment used to generate ``freqdist``.
829
+
830
+ :type freqdist: FreqDist
831
+ :param freqdist: The frequency distribution that the
832
+ probability estimates should be based on.
833
+ :type gamma: float
834
+ :param gamma: A real number used to parameterize the
835
+ estimate. The Lidstone estimate is equivalent to adding
836
+ *gamma* to the count for each bin, and taking the
837
+ maximum likelihood estimate of the resulting frequency
838
+ distribution.
839
+ :type bins: int
840
+ :param bins: The number of sample values that can be generated
841
+ by the experiment that is described by the probability
842
+ distribution. This value must be correctly set for the
843
+ probabilities of the sample values to sum to one. If
844
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
845
+ """
846
+ if (bins == 0) or (bins is None and freqdist.N() == 0):
847
+ name = self.__class__.__name__[:-8]
848
+ raise ValueError(
849
+ "A %s probability distribution " % name + "must have at least one bin."
850
+ )
851
+ if (bins is not None) and (bins < freqdist.B()):
852
+ name = self.__class__.__name__[:-8]
853
+ raise ValueError(
854
+ "\nThe number of bins in a %s distribution " % name
855
+ + "(%d) must be greater than or equal to\n" % bins
856
+ + "the number of bins in the FreqDist used "
857
+ + "to create it (%d)." % freqdist.B()
858
+ )
859
+
860
+ self._freqdist = freqdist
861
+ self._gamma = float(gamma)
862
+ self._N = self._freqdist.N()
863
+
864
+ if bins is None:
865
+ bins = freqdist.B()
866
+ self._bins = bins
867
+
868
+ self._divisor = self._N + bins * gamma
869
+ if self._divisor == 0.0:
870
+ # In extreme cases we force the probability to be 0,
871
+ # which it will be, since the count will be 0:
872
+ self._gamma = 0
873
+ self._divisor = 1
874
+
875
+ def freqdist(self):
876
+ """
877
+ Return the frequency distribution that this probability
878
+ distribution is based on.
879
+
880
+ :rtype: FreqDist
881
+ """
882
+ return self._freqdist
883
+
884
+ def prob(self, sample):
885
+ c = self._freqdist[sample]
886
+ return (c + self._gamma) / self._divisor
887
+
888
+ def max(self):
889
+ # For Lidstone distributions, probability is monotonic with
890
+ # frequency, so the most probable sample is the one that
891
+ # occurs most frequently.
892
+ return self._freqdist.max()
893
+
894
+ def samples(self):
895
+ return self._freqdist.keys()
896
+
897
+ def discount(self):
898
+ gb = self._gamma * self._bins
899
+ return gb / (self._N + gb)
900
+
901
+ def __repr__(self):
902
+ """
903
+ Return a string representation of this ``ProbDist``.
904
+
905
+ :rtype: str
906
+ """
907
+ return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
908
+
909
+
910
+ class LaplaceProbDist(LidstoneProbDist):
911
+ """
912
+ The Laplace estimate for the probability distribution of the
913
+ experiment used to generate a frequency distribution. The
914
+ "Laplace estimate" approximates the probability of a sample with
915
+ count *c* from an experiment with *N* outcomes and *B* bins as
916
+ *(c+1)/(N+B)*. This is equivalent to adding one to the count for
917
+ each bin, and taking the maximum likelihood estimate of the
918
+ resulting frequency distribution.
919
+ """
920
+
921
+ def __init__(self, freqdist, bins=None):
922
+ """
923
+ Use the Laplace estimate to create a probability distribution
924
+ for the experiment used to generate ``freqdist``.
925
+
926
+ :type freqdist: FreqDist
927
+ :param freqdist: The frequency distribution that the
928
+ probability estimates should be based on.
929
+ :type bins: int
930
+ :param bins: The number of sample values that can be generated
931
+ by the experiment that is described by the probability
932
+ distribution. This value must be correctly set for the
933
+ probabilities of the sample values to sum to one. If
934
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
935
+ """
936
+ LidstoneProbDist.__init__(self, freqdist, 1, bins)
937
+
938
+ def __repr__(self):
939
+ """
940
+ :rtype: str
941
+ :return: A string representation of this ``ProbDist``.
942
+ """
943
+ return "<LaplaceProbDist based on %d samples>" % self._freqdist.N()
944
+
945
+
946
+ class ELEProbDist(LidstoneProbDist):
947
+ """
948
+ The expected likelihood estimate for the probability distribution
949
+ of the experiment used to generate a frequency distribution. The
950
+ "expected likelihood estimate" approximates the probability of a
951
+ sample with count *c* from an experiment with *N* outcomes and
952
+ *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
953
+ to the count for each bin, and taking the maximum likelihood
954
+ estimate of the resulting frequency distribution.
955
+ """
956
+
957
+ def __init__(self, freqdist, bins=None):
958
+ """
959
+ Use the expected likelihood estimate to create a probability
960
+ distribution for the experiment used to generate ``freqdist``.
961
+
962
+ :type freqdist: FreqDist
963
+ :param freqdist: The frequency distribution that the
964
+ probability estimates should be based on.
965
+ :type bins: int
966
+ :param bins: The number of sample values that can be generated
967
+ by the experiment that is described by the probability
968
+ distribution. This value must be correctly set for the
969
+ probabilities of the sample values to sum to one. If
970
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
971
+ """
972
+ LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
973
+
974
+ def __repr__(self):
975
+ """
976
+ Return a string representation of this ``ProbDist``.
977
+
978
+ :rtype: str
979
+ """
980
+ return "<ELEProbDist based on %d samples>" % self._freqdist.N()
981
+
982
+
983
+ class HeldoutProbDist(ProbDistI):
984
+ """
985
+ The heldout estimate for the probability distribution of the
986
+ experiment used to generate two frequency distributions. These
987
+ two frequency distributions are called the "heldout frequency
988
+ distribution" and the "base frequency distribution." The
989
+ "heldout estimate" uses uses the "heldout frequency
990
+ distribution" to predict the probability of each sample, given its
991
+ frequency in the "base frequency distribution".
992
+
993
+ In particular, the heldout estimate approximates the probability
994
+ for a sample that occurs *r* times in the base distribution as
995
+ the average frequency in the heldout distribution of all samples
996
+ that occur *r* times in the base distribution.
997
+
998
+ This average frequency is *Tr[r]/(Nr[r].N)*, where:
999
+
1000
+ - *Tr[r]* is the total count in the heldout distribution for
1001
+ all samples that occur *r* times in the base distribution.
1002
+ - *Nr[r]* is the number of samples that occur *r* times in
1003
+ the base distribution.
1004
+ - *N* is the number of outcomes recorded by the heldout
1005
+ frequency distribution.
1006
+
1007
+ In order to increase the efficiency of the ``prob`` member
1008
+ function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
1009
+ when the ``HeldoutProbDist`` is created.
1010
+
1011
+ :type _estimate: list(float)
1012
+ :ivar _estimate: A list mapping from *r*, the number of
1013
+ times that a sample occurs in the base distribution, to the
1014
+ probability estimate for that sample. ``_estimate[r]`` is
1015
+ calculated by finding the average frequency in the heldout
1016
+ distribution of all samples that occur *r* times in the base
1017
+ distribution. In particular, ``_estimate[r]`` =
1018
+ *Tr[r]/(Nr[r].N)*.
1019
+ :type _max_r: int
1020
+ :ivar _max_r: The maximum number of times that any sample occurs
1021
+ in the base distribution. ``_max_r`` is used to decide how
1022
+ large ``_estimate`` must be.
1023
+ """
1024
+
1025
+ SUM_TO_ONE = False
1026
+
1027
+ def __init__(self, base_fdist, heldout_fdist, bins=None):
1028
+ """
1029
+ Use the heldout estimate to create a probability distribution
1030
+ for the experiment used to generate ``base_fdist`` and
1031
+ ``heldout_fdist``.
1032
+
1033
+ :type base_fdist: FreqDist
1034
+ :param base_fdist: The base frequency distribution.
1035
+ :type heldout_fdist: FreqDist
1036
+ :param heldout_fdist: The heldout frequency distribution.
1037
+ :type bins: int
1038
+ :param bins: The number of sample values that can be generated
1039
+ by the experiment that is described by the probability
1040
+ distribution. This value must be correctly set for the
1041
+ probabilities of the sample values to sum to one. If
1042
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
1043
+ """
1044
+
1045
+ self._base_fdist = base_fdist
1046
+ self._heldout_fdist = heldout_fdist
1047
+
1048
+ # The max number of times any sample occurs in base_fdist.
1049
+ self._max_r = base_fdist[base_fdist.max()]
1050
+
1051
+ # Calculate Tr, Nr, and N.
1052
+ Tr = self._calculate_Tr()
1053
+ r_Nr = base_fdist.r_Nr(bins)
1054
+ Nr = [r_Nr[r] for r in range(self._max_r + 1)]
1055
+ N = heldout_fdist.N()
1056
+
1057
+ # Use Tr, Nr, and N to compute the probability estimate for
1058
+ # each value of r.
1059
+ self._estimate = self._calculate_estimate(Tr, Nr, N)
1060
+
1061
+ def _calculate_Tr(self):
1062
+ """
1063
+ Return the list *Tr*, where *Tr[r]* is the total count in
1064
+ ``heldout_fdist`` for all samples that occur *r*
1065
+ times in ``base_fdist``.
1066
+
1067
+ :rtype: list(float)
1068
+ """
1069
+ Tr = [0.0] * (self._max_r + 1)
1070
+ for sample in self._heldout_fdist:
1071
+ r = self._base_fdist[sample]
1072
+ Tr[r] += self._heldout_fdist[sample]
1073
+ return Tr
1074
+
1075
+ def _calculate_estimate(self, Tr, Nr, N):
1076
+ """
1077
+ Return the list *estimate*, where *estimate[r]* is the probability
1078
+ estimate for any sample that occurs *r* times in the base frequency
1079
+ distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
1080
+ In the special case that *N[r]=0*, *estimate[r]* will never be used;
1081
+ so we define *estimate[r]=None* for those cases.
1082
+
1083
+ :rtype: list(float)
1084
+ :type Tr: list(float)
1085
+ :param Tr: the list *Tr*, where *Tr[r]* is the total count in
1086
+ the heldout distribution for all samples that occur *r*
1087
+ times in base distribution.
1088
+ :type Nr: list(float)
1089
+ :param Nr: The list *Nr*, where *Nr[r]* is the number of
1090
+ samples that occur *r* times in the base distribution.
1091
+ :type N: int
1092
+ :param N: The total number of outcomes recorded by the heldout
1093
+ frequency distribution.
1094
+ """
1095
+ estimate = []
1096
+ for r in range(self._max_r + 1):
1097
+ if Nr[r] == 0:
1098
+ estimate.append(None)
1099
+ else:
1100
+ estimate.append(Tr[r] / (Nr[r] * N))
1101
+ return estimate
1102
+
1103
+ def base_fdist(self):
1104
+ """
1105
+ Return the base frequency distribution that this probability
1106
+ distribution is based on.
1107
+
1108
+ :rtype: FreqDist
1109
+ """
1110
+ return self._base_fdist
1111
+
1112
+ def heldout_fdist(self):
1113
+ """
1114
+ Return the heldout frequency distribution that this
1115
+ probability distribution is based on.
1116
+
1117
+ :rtype: FreqDist
1118
+ """
1119
+ return self._heldout_fdist
1120
+
1121
+ def samples(self):
1122
+ return self._base_fdist.keys()
1123
+
1124
+ def prob(self, sample):
1125
+ # Use our precomputed probability estimate.
1126
+ r = self._base_fdist[sample]
1127
+ return self._estimate[r]
1128
+
1129
+ def max(self):
1130
+ # Note: the Heldout estimation is *not* necessarily monotonic;
1131
+ # so this implementation is currently broken. However, it
1132
+ # should give the right answer *most* of the time. :)
1133
+ return self._base_fdist.max()
1134
+
1135
+ def discount(self):
1136
+ raise NotImplementedError()
1137
+
1138
+ def __repr__(self):
1139
+ """
1140
+ :rtype: str
1141
+ :return: A string representation of this ``ProbDist``.
1142
+ """
1143
+ s = "<HeldoutProbDist: %d base samples; %d heldout samples>"
1144
+ return s % (self._base_fdist.N(), self._heldout_fdist.N())
1145
+
1146
+
1147
+ class CrossValidationProbDist(ProbDistI):
1148
+ """
1149
+ The cross-validation estimate for the probability distribution of
1150
+ the experiment used to generate a set of frequency distribution.
1151
+ The "cross-validation estimate" for the probability of a sample
1152
+ is found by averaging the held-out estimates for the sample in
1153
+ each pair of frequency distributions.
1154
+ """
1155
+
1156
+ SUM_TO_ONE = False
1157
+
1158
+ def __init__(self, freqdists, bins):
1159
+ """
1160
+ Use the cross-validation estimate to create a probability
1161
+ distribution for the experiment used to generate
1162
+ ``freqdists``.
1163
+
1164
+ :type freqdists: list(FreqDist)
1165
+ :param freqdists: A list of the frequency distributions
1166
+ generated by the experiment.
1167
+ :type bins: int
1168
+ :param bins: The number of sample values that can be generated
1169
+ by the experiment that is described by the probability
1170
+ distribution. This value must be correctly set for the
1171
+ probabilities of the sample values to sum to one. If
1172
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
1173
+ """
1174
+ self._freqdists = freqdists
1175
+
1176
+ # Create a heldout probability distribution for each pair of
1177
+ # frequency distributions in freqdists.
1178
+ self._heldout_probdists = []
1179
+ for fdist1 in freqdists:
1180
+ for fdist2 in freqdists:
1181
+ if fdist1 is not fdist2:
1182
+ probdist = HeldoutProbDist(fdist1, fdist2, bins)
1183
+ self._heldout_probdists.append(probdist)
1184
+
1185
+ def freqdists(self):
1186
+ """
1187
+ Return the list of frequency distributions that this ``ProbDist`` is based on.
1188
+
1189
+ :rtype: list(FreqDist)
1190
+ """
1191
+ return self._freqdists
1192
+
1193
+ def samples(self):
1194
+ # [xx] nb: this is not too efficient
1195
+ return set(sum((list(fd) for fd in self._freqdists), []))
1196
+
1197
+ def prob(self, sample):
1198
+ # Find the average probability estimate returned by each
1199
+ # heldout distribution.
1200
+ prob = 0.0
1201
+ for heldout_probdist in self._heldout_probdists:
1202
+ prob += heldout_probdist.prob(sample)
1203
+ return prob / len(self._heldout_probdists)
1204
+
1205
+ def discount(self):
1206
+ raise NotImplementedError()
1207
+
1208
+ def __repr__(self):
1209
+ """
1210
+ Return a string representation of this ``ProbDist``.
1211
+
1212
+ :rtype: str
1213
+ """
1214
+ return "<CrossValidationProbDist: %d-way>" % len(self._freqdists)
1215
+
1216
+
1217
+ class WittenBellProbDist(ProbDistI):
1218
+ """
1219
+ The Witten-Bell estimate of a probability distribution. This distribution
1220
+ allocates uniform probability mass to as yet unseen events by using the
1221
+ number of events that have only been seen once. The probability mass
1222
+ reserved for unseen events is equal to *T / (N + T)*
1223
+ where *T* is the number of observed event types and *N* is the total
1224
+ number of observed events. This equates to the maximum likelihood estimate
1225
+ of a new type event occurring. The remaining probability mass is discounted
1226
+ such that all probability estimates sum to one, yielding:
1227
+
1228
+ - *p = T / Z (N + T)*, if count = 0
1229
+ - *p = c / (N + T)*, otherwise
1230
+ """
1231
+
1232
+ def __init__(self, freqdist, bins=None):
1233
+ """
1234
+ Creates a distribution of Witten-Bell probability estimates. This
1235
+ distribution allocates uniform probability mass to as yet unseen
1236
+ events by using the number of events that have only been seen once. The
1237
+ probability mass reserved for unseen events is equal to *T / (N + T)*
1238
+ where *T* is the number of observed event types and *N* is the total
1239
+ number of observed events. This equates to the maximum likelihood
1240
+ estimate of a new type event occurring. The remaining probability mass
1241
+ is discounted such that all probability estimates sum to one,
1242
+ yielding:
1243
+
1244
+ - *p = T / Z (N + T)*, if count = 0
1245
+ - *p = c / (N + T)*, otherwise
1246
+
1247
+ The parameters *T* and *N* are taken from the ``freqdist`` parameter
1248
+ (the ``B()`` and ``N()`` values). The normalizing factor *Z* is
1249
+ calculated using these values along with the ``bins`` parameter.
1250
+
1251
+ :param freqdist: The frequency counts upon which to base the
1252
+ estimation.
1253
+ :type freqdist: FreqDist
1254
+ :param bins: The number of possible event types. This must be at least
1255
+ as large as the number of bins in the ``freqdist``. If None, then
1256
+ it's assumed to be equal to that of the ``freqdist``
1257
+ :type bins: int
1258
+ """
1259
+ assert bins is None or bins >= freqdist.B(), (
1260
+ "bins parameter must not be less than %d=freqdist.B()" % freqdist.B()
1261
+ )
1262
+ if bins is None:
1263
+ bins = freqdist.B()
1264
+ self._freqdist = freqdist
1265
+ self._T = self._freqdist.B()
1266
+ self._Z = bins - self._freqdist.B()
1267
+ self._N = self._freqdist.N()
1268
+ # self._P0 is P(0), precalculated for efficiency:
1269
+ if self._N == 0:
1270
+ # if freqdist is empty, we approximate P(0) by a UniformProbDist:
1271
+ self._P0 = 1.0 / self._Z
1272
+ else:
1273
+ self._P0 = self._T / (self._Z * (self._N + self._T))
1274
+
1275
+ def prob(self, sample):
1276
+ # inherit docs from ProbDistI
1277
+ c = self._freqdist[sample]
1278
+ return c / (self._N + self._T) if c != 0 else self._P0
1279
+
1280
+ def max(self):
1281
+ return self._freqdist.max()
1282
+
1283
+ def samples(self):
1284
+ return self._freqdist.keys()
1285
+
1286
+ def freqdist(self):
1287
+ return self._freqdist
1288
+
1289
+ def discount(self):
1290
+ raise NotImplementedError()
1291
+
1292
+ def __repr__(self):
1293
+ """
1294
+ Return a string representation of this ``ProbDist``.
1295
+
1296
+ :rtype: str
1297
+ """
1298
+ return "<WittenBellProbDist based on %d samples>" % self._freqdist.N()
1299
+
1300
+
1301
+ ##//////////////////////////////////////////////////////
1302
+ ## Good-Turing Probability Distributions
1303
+ ##//////////////////////////////////////////////////////
1304
+
1305
+ # Good-Turing frequency estimation was contributed by Alan Turing and
1306
+ # his statistical assistant I.J. Good, during their collaboration in
1307
+ # the WWII. It is a statistical technique for predicting the
1308
+ # probability of occurrence of objects belonging to an unknown number
1309
+ # of species, given past observations of such objects and their
1310
+ # species. (In drawing balls from an urn, the 'objects' would be balls
1311
+ # and the 'species' would be the distinct colors of the balls (finite
1312
+ # but unknown in number).
1313
+ #
1314
+ # Good-Turing method calculates the probability mass to assign to
1315
+ # events with zero or low counts based on the number of events with
1316
+ # higher counts. It does so by using the adjusted count *c\**:
1317
+ #
1318
+ # - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
1319
+ # - *things with frequency zero in training* = N(1) for c == 0
1320
+ #
1321
+ # where *c* is the original count, *N(i)* is the number of event types
1322
+ # observed with count *i*. We can think the count of unseen as the count
1323
+ # of frequency one (see Jurafsky & Martin 2nd Edition, p101).
1324
+ #
1325
+ # This method is problematic because the situation ``N(c+1) == 0``
1326
+ # is quite common in the original Good-Turing estimation; smoothing or
1327
+ # interpolation of *N(i)* values is essential in practice.
1328
+ #
1329
+ # Bill Gale and Geoffrey Sampson present a simple and effective approach,
1330
+ # Simple Good-Turing. As a smoothing curve they simply use a power curve:
1331
+ #
1332
+ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
1333
+ # relationship)
1334
+ #
1335
+ # They estimate a and b by simple linear regression technique on the
1336
+ # logarithmic form of the equation:
1337
+ #
1338
+ # log Nr = a + b*log(r)
1339
+ #
1340
+ # However, they suggest that such a simple curve is probably only
1341
+ # appropriate for high values of r. For low values of r, they use the
1342
+ # measured Nr directly. (see M&S, p.213)
1343
+ #
1344
+ # Gale and Sampson propose to use r while the difference between r and
1345
+ # r* is 1.96 greater than the standard deviation, and switch to r* if
1346
+ # it is less or equal:
1347
+ #
1348
+ # |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
1349
+ #
1350
+ # The 1.96 coefficient correspond to a 0.05 significance criterion,
1351
+ # some implementations can use a coefficient of 1.65 for a 0.1
1352
+ # significance criterion.
1353
+ #
1354
+
1355
+ ##//////////////////////////////////////////////////////
1356
+ ## Simple Good-Turing Probablity Distributions
1357
+ ##//////////////////////////////////////////////////////
1358
+
1359
+
1360
+ class SimpleGoodTuringProbDist(ProbDistI):
1361
+ """
1362
+ SimpleGoodTuring ProbDist approximates from frequency to frequency of
1363
+ frequency into a linear line under log space by linear regression.
1364
+ Details of Simple Good-Turing algorithm can be found in:
1365
+
1366
+ - Good Turing smoothing without tears" (Gale & Sampson 1995),
1367
+ Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
1368
+ - "Speech and Language Processing (Jurafsky & Martin),
1369
+ 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
1370
+ - https://www.grsampson.net/RGoodTur.html
1371
+
1372
+ Given a set of pair (xi, yi), where the xi denotes the frequency and
1373
+ yi denotes the frequency of frequency, we want to minimize their
1374
+ square variation. E(x) and E(y) represent the mean of xi and yi.
1375
+
1376
+ - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
1377
+ - intercept: a = E(y) - b.E(x)
1378
+ """
1379
+
1380
+ SUM_TO_ONE = False
1381
+
1382
+ def __init__(self, freqdist, bins=None):
1383
+ """
1384
+ :param freqdist: The frequency counts upon which to base the
1385
+ estimation.
1386
+ :type freqdist: FreqDist
1387
+ :param bins: The number of possible event types. This must be
1388
+ larger than the number of bins in the ``freqdist``. If None,
1389
+ then it's assumed to be equal to ``freqdist``.B() + 1
1390
+ :type bins: int
1391
+ """
1392
+ assert (
1393
+ bins is None or bins > freqdist.B()
1394
+ ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1)
1395
+ if bins is None:
1396
+ bins = freqdist.B() + 1
1397
+ self._freqdist = freqdist
1398
+ self._bins = bins
1399
+ r, nr = self._r_Nr()
1400
+ self.find_best_fit(r, nr)
1401
+ self._switch(r, nr)
1402
+ self._renormalize(r, nr)
1403
+
1404
+ def _r_Nr_non_zero(self):
1405
+ r_Nr = self._freqdist.r_Nr()
1406
+ del r_Nr[0]
1407
+ return r_Nr
1408
+
1409
+ def _r_Nr(self):
1410
+ """
1411
+ Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
1412
+ """
1413
+ nonzero = self._r_Nr_non_zero()
1414
+
1415
+ if not nonzero:
1416
+ return [], []
1417
+ return zip(*sorted(nonzero.items()))
1418
+
1419
+ def find_best_fit(self, r, nr):
1420
+ """
1421
+ Use simple linear regression to tune parameters self._slope and
1422
+ self._intercept in the log-log space based on count and Nr(count)
1423
+ (Work in log space to avoid floating point underflow.)
1424
+ """
1425
+ # For higher sample frequencies the data points becomes horizontal
1426
+ # along line Nr=1. To create a more evident linear model in log-log
1427
+ # space, we average positive Nr values with the surrounding zero
1428
+ # values. (Church and Gale, 1991)
1429
+
1430
+ if not r or not nr:
1431
+ # Empty r or nr?
1432
+ return
1433
+
1434
+ zr = []
1435
+ for j in range(len(r)):
1436
+ i = r[j - 1] if j > 0 else 0
1437
+ k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1]
1438
+ zr_ = 2.0 * nr[j] / (k - i)
1439
+ zr.append(zr_)
1440
+
1441
+ log_r = [math.log(i) for i in r]
1442
+ log_zr = [math.log(i) for i in zr]
1443
+
1444
+ xy_cov = x_var = 0.0
1445
+ x_mean = sum(log_r) / len(log_r)
1446
+ y_mean = sum(log_zr) / len(log_zr)
1447
+ for (x, y) in zip(log_r, log_zr):
1448
+ xy_cov += (x - x_mean) * (y - y_mean)
1449
+ x_var += (x - x_mean) ** 2
1450
+ self._slope = xy_cov / x_var if x_var != 0 else 0.0
1451
+ if self._slope >= -1:
1452
+ warnings.warn(
1453
+ "SimpleGoodTuring did not find a proper best fit "
1454
+ "line for smoothing probabilities of occurrences. "
1455
+ "The probability estimates are likely to be "
1456
+ "unreliable."
1457
+ )
1458
+ self._intercept = y_mean - self._slope * x_mean
1459
+
1460
+ def _switch(self, r, nr):
1461
+ """
1462
+ Calculate the r frontier where we must switch from Nr to Sr
1463
+ when estimating E[Nr].
1464
+ """
1465
+ for i, r_ in enumerate(r):
1466
+ if len(r) == i + 1 or r[i + 1] != r_ + 1:
1467
+ # We are at the end of r, or there is a gap in r
1468
+ self._switch_at = r_
1469
+ break
1470
+
1471
+ Sr = self.smoothedNr
1472
+ smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_)
1473
+ unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i]
1474
+
1475
+ std = math.sqrt(self._variance(r_, nr[i], nr[i + 1]))
1476
+ if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std:
1477
+ self._switch_at = r_
1478
+ break
1479
+
1480
+ def _variance(self, r, nr, nr_1):
1481
+ r = float(r)
1482
+ nr = float(nr)
1483
+ nr_1 = float(nr_1)
1484
+ return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
1485
+
1486
+ def _renormalize(self, r, nr):
1487
+ """
1488
+ It is necessary to renormalize all the probability estimates to
1489
+ ensure a proper probability distribution results. This can be done
1490
+ by keeping the estimate of the probability mass for unseen items as
1491
+ N(1)/N and renormalizing all the estimates for previously seen items
1492
+ (as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
1493
+ """
1494
+ prob_cov = 0.0
1495
+ for r_, nr_ in zip(r, nr):
1496
+ prob_cov += nr_ * self._prob_measure(r_)
1497
+ if prob_cov:
1498
+ self._renormal = (1 - self._prob_measure(0)) / prob_cov
1499
+
1500
+ def smoothedNr(self, r):
1501
+ """
1502
+ Return the number of samples with count r.
1503
+
1504
+ :param r: The amount of frequency.
1505
+ :type r: int
1506
+ :rtype: float
1507
+ """
1508
+
1509
+ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
1510
+ # relationship)
1511
+ # Estimate a and b by simple linear regression technique on
1512
+ # the logarithmic form of the equation: log Nr = a + b*log(r)
1513
+
1514
+ return math.exp(self._intercept + self._slope * math.log(r))
1515
+
1516
+ def prob(self, sample):
1517
+ """
1518
+ Return the sample's probability.
1519
+
1520
+ :param sample: sample of the event
1521
+ :type sample: str
1522
+ :rtype: float
1523
+ """
1524
+ count = self._freqdist[sample]
1525
+ p = self._prob_measure(count)
1526
+ if count == 0:
1527
+ if self._bins == self._freqdist.B():
1528
+ p = 0.0
1529
+ else:
1530
+ p = p / (self._bins - self._freqdist.B())
1531
+ else:
1532
+ p = p * self._renormal
1533
+ return p
1534
+
1535
+ def _prob_measure(self, count):
1536
+ if count == 0 and self._freqdist.N() == 0:
1537
+ return 1.0
1538
+ elif count == 0 and self._freqdist.N() != 0:
1539
+ return self._freqdist.Nr(1) / self._freqdist.N()
1540
+
1541
+ if self._switch_at > count:
1542
+ Er_1 = self._freqdist.Nr(count + 1)
1543
+ Er = self._freqdist.Nr(count)
1544
+ else:
1545
+ Er_1 = self.smoothedNr(count + 1)
1546
+ Er = self.smoothedNr(count)
1547
+
1548
+ r_star = (count + 1) * Er_1 / Er
1549
+ return r_star / self._freqdist.N()
1550
+
1551
+ def check(self):
1552
+ prob_sum = 0.0
1553
+ for i in range(0, len(self._Nr)):
1554
+ prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
1555
+ print("Probability Sum:", prob_sum)
1556
+ # assert prob_sum != 1.0, "probability sum should be one!"
1557
+
1558
+ def discount(self):
1559
+ """
1560
+ This function returns the total mass of probability transfers from the
1561
+ seen samples to the unseen samples.
1562
+ """
1563
+ return self.smoothedNr(1) / self._freqdist.N()
1564
+
1565
+ def max(self):
1566
+ return self._freqdist.max()
1567
+
1568
+ def samples(self):
1569
+ return self._freqdist.keys()
1570
+
1571
+ def freqdist(self):
1572
+ return self._freqdist
1573
+
1574
+ def __repr__(self):
1575
+ """
1576
+ Return a string representation of this ``ProbDist``.
1577
+
1578
+ :rtype: str
1579
+ """
1580
+ return "<SimpleGoodTuringProbDist based on %d samples>" % self._freqdist.N()
1581
+
1582
+
1583
+ class MutableProbDist(ProbDistI):
1584
+ """
1585
+ An mutable probdist where the probabilities may be easily modified. This
1586
+ simply copies an existing probdist, storing the probability values in a
1587
+ mutable dictionary and providing an update method.
1588
+ """
1589
+
1590
+ def __init__(self, prob_dist, samples, store_logs=True):
1591
+ """
1592
+ Creates the mutable probdist based on the given prob_dist and using
1593
+ the list of samples given. These values are stored as log
1594
+ probabilities if the store_logs flag is set.
1595
+
1596
+ :param prob_dist: the distribution from which to garner the
1597
+ probabilities
1598
+ :type prob_dist: ProbDist
1599
+ :param samples: the complete set of samples
1600
+ :type samples: sequence of any
1601
+ :param store_logs: whether to store the probabilities as logarithms
1602
+ :type store_logs: bool
1603
+ """
1604
+ self._samples = samples
1605
+ self._sample_dict = {samples[i]: i for i in range(len(samples))}
1606
+ self._data = array.array("d", [0.0]) * len(samples)
1607
+ for i in range(len(samples)):
1608
+ if store_logs:
1609
+ self._data[i] = prob_dist.logprob(samples[i])
1610
+ else:
1611
+ self._data[i] = prob_dist.prob(samples[i])
1612
+ self._logs = store_logs
1613
+
1614
+ def max(self):
1615
+ # inherit documentation
1616
+ return max((p, v) for (v, p) in self._sample_dict.items())[1]
1617
+
1618
+ def samples(self):
1619
+ # inherit documentation
1620
+ return self._samples
1621
+
1622
+ def prob(self, sample):
1623
+ # inherit documentation
1624
+ i = self._sample_dict.get(sample)
1625
+ if i is None:
1626
+ return 0.0
1627
+ return 2 ** (self._data[i]) if self._logs else self._data[i]
1628
+
1629
+ def logprob(self, sample):
1630
+ # inherit documentation
1631
+ i = self._sample_dict.get(sample)
1632
+ if i is None:
1633
+ return float("-inf")
1634
+ return self._data[i] if self._logs else math.log(self._data[i], 2)
1635
+
1636
+ def update(self, sample, prob, log=True):
1637
+ """
1638
+ Update the probability for the given sample. This may cause the object
1639
+ to stop being the valid probability distribution - the user must
1640
+ ensure that they update the sample probabilities such that all samples
1641
+ have probabilities between 0 and 1 and that all probabilities sum to
1642
+ one.
1643
+
1644
+ :param sample: the sample for which to update the probability
1645
+ :type sample: any
1646
+ :param prob: the new probability
1647
+ :type prob: float
1648
+ :param log: is the probability already logged
1649
+ :type log: bool
1650
+ """
1651
+ i = self._sample_dict.get(sample)
1652
+ assert i is not None
1653
+ if self._logs:
1654
+ self._data[i] = prob if log else math.log(prob, 2)
1655
+ else:
1656
+ self._data[i] = 2 ** (prob) if log else prob
1657
+
1658
+
1659
+ ##/////////////////////////////////////////////////////
1660
+ ## Kneser-Ney Probability Distribution
1661
+ ##//////////////////////////////////////////////////////
1662
+
1663
+ # This method for calculating probabilities was introduced in 1995 by Reinhard
1664
+ # Kneser and Hermann Ney. It was meant to improve the accuracy of language
1665
+ # models that use backing-off to deal with sparse data. The authors propose two
1666
+ # ways of doing so: a marginal distribution constraint on the back-off
1667
+ # distribution and a leave-one-out distribution. For a start, the first one is
1668
+ # implemented as a class below.
1669
+ #
1670
+ # The idea behind a back-off n-gram model is that we have a series of
1671
+ # frequency distributions for our n-grams so that in case we have not seen a
1672
+ # given n-gram during training (and as a result have a 0 probability for it) we
1673
+ # can 'back off' (hence the name!) and try testing whether we've seen the
1674
+ # n-1-gram part of the n-gram in training.
1675
+ #
1676
+ # The novelty of Kneser and Ney's approach was that they decided to fiddle
1677
+ # around with the way this latter, backed off probability was being calculated
1678
+ # whereas their peers seemed to focus on the primary probability.
1679
+ #
1680
+ # The implementation below uses one of the techniques described in their paper
1681
+ # titled "Improved backing-off for n-gram language modeling." In the same paper
1682
+ # another technique is introduced to attempt to smooth the back-off
1683
+ # distribution as well as the primary one. There is also a much-cited
1684
+ # modification of this method proposed by Chen and Goodman.
1685
+ #
1686
+ # In order for the implementation of Kneser-Ney to be more efficient, some
1687
+ # changes have been made to the original algorithm. Namely, the calculation of
1688
+ # the normalizing function gamma has been significantly simplified and
1689
+ # combined slightly differently with beta. None of these changes affect the
1690
+ # nature of the algorithm, but instead aim to cut out unnecessary calculations
1691
+ # and take advantage of storing and retrieving information in dictionaries
1692
+ # where possible.
1693
+
1694
+
1695
+ class KneserNeyProbDist(ProbDistI):
1696
+ """
1697
+ Kneser-Ney estimate of a probability distribution. This is a version of
1698
+ back-off that counts how likely an n-gram is provided the n-1-gram had
1699
+ been seen in training. Extends the ProbDistI interface, requires a trigram
1700
+ FreqDist instance to train on. Optionally, a different from default discount
1701
+ value can be specified. The default discount is set to 0.75.
1702
+
1703
+ """
1704
+
1705
+ def __init__(self, freqdist, bins=None, discount=0.75):
1706
+ """
1707
+ :param freqdist: The trigram frequency distribution upon which to base
1708
+ the estimation
1709
+ :type freqdist: FreqDist
1710
+ :param bins: Included for compatibility with nltk.tag.hmm
1711
+ :type bins: int or float
1712
+ :param discount: The discount applied when retrieving counts of
1713
+ trigrams
1714
+ :type discount: float (preferred, but can be set to int)
1715
+ """
1716
+
1717
+ if not bins:
1718
+ self._bins = freqdist.B()
1719
+ else:
1720
+ self._bins = bins
1721
+ self._D = discount
1722
+
1723
+ # cache for probability calculation
1724
+ self._cache = {}
1725
+
1726
+ # internal bigram and trigram frequency distributions
1727
+ self._bigrams = defaultdict(int)
1728
+ self._trigrams = freqdist
1729
+
1730
+ # helper dictionaries used to calculate probabilities
1731
+ self._wordtypes_after = defaultdict(float)
1732
+ self._trigrams_contain = defaultdict(float)
1733
+ self._wordtypes_before = defaultdict(float)
1734
+ for w0, w1, w2 in freqdist:
1735
+ self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)]
1736
+ self._wordtypes_after[(w0, w1)] += 1
1737
+ self._trigrams_contain[w1] += 1
1738
+ self._wordtypes_before[(w1, w2)] += 1
1739
+
1740
+ def prob(self, trigram):
1741
+ # sample must be a triple
1742
+ if len(trigram) != 3:
1743
+ raise ValueError("Expected an iterable with 3 members.")
1744
+ trigram = tuple(trigram)
1745
+ w0, w1, w2 = trigram
1746
+
1747
+ if trigram in self._cache:
1748
+ return self._cache[trigram]
1749
+ else:
1750
+ # if the sample trigram was seen during training
1751
+ if trigram in self._trigrams:
1752
+ prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[
1753
+ (w0, w1)
1754
+ ]
1755
+
1756
+ # else if the 'rougher' environment was seen during training
1757
+ elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before:
1758
+ aftr = self._wordtypes_after[(w0, w1)]
1759
+ bfr = self._wordtypes_before[(w1, w2)]
1760
+
1761
+ # the probability left over from alphas
1762
+ leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)]
1763
+
1764
+ # the beta (including normalization)
1765
+ beta = bfr / (self._trigrams_contain[w1] - aftr)
1766
+
1767
+ prob = leftover_prob * beta
1768
+
1769
+ # else the sample was completely unseen during training
1770
+ else:
1771
+ prob = 0.0
1772
+
1773
+ self._cache[trigram] = prob
1774
+ return prob
1775
+
1776
+ def discount(self):
1777
+ """
1778
+ Return the value by which counts are discounted. By default set to 0.75.
1779
+
1780
+ :rtype: float
1781
+ """
1782
+ return self._D
1783
+
1784
+ def set_discount(self, discount):
1785
+ """
1786
+ Set the value by which counts are discounted to the value of discount.
1787
+
1788
+ :param discount: the new value to discount counts by
1789
+ :type discount: float (preferred, but int possible)
1790
+ :rtype: None
1791
+ """
1792
+ self._D = discount
1793
+
1794
+ def samples(self):
1795
+ return self._trigrams.keys()
1796
+
1797
+ def max(self):
1798
+ return self._trigrams.max()
1799
+
1800
+ def __repr__(self):
1801
+ """
1802
+ Return a string representation of this ProbDist
1803
+
1804
+ :rtype: str
1805
+ """
1806
+ return f"<KneserNeyProbDist based on {self._trigrams.N()} trigrams"
1807
+
1808
+
1809
+ ##//////////////////////////////////////////////////////
1810
+ ## Probability Distribution Operations
1811
+ ##//////////////////////////////////////////////////////
1812
+
1813
+
1814
+ def log_likelihood(test_pdist, actual_pdist):
1815
+ if not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI):
1816
+ raise ValueError("expected a ProbDist.")
1817
+ # Is this right?
1818
+ return sum(
1819
+ actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist
1820
+ )
1821
+
1822
+
1823
+ def entropy(pdist):
1824
+ probs = (pdist.prob(s) for s in pdist.samples())
1825
+ return -sum(p * math.log(p, 2) for p in probs)
1826
+
1827
+
1828
+ ##//////////////////////////////////////////////////////
1829
+ ## Conditional Distributions
1830
+ ##//////////////////////////////////////////////////////
1831
+
1832
+
1833
+ class ConditionalFreqDist(defaultdict):
1834
+ """
1835
+ A collection of frequency distributions for a single experiment
1836
+ run under different conditions. Conditional frequency
1837
+ distributions are used to record the number of times each sample
1838
+ occurred, given the condition under which the experiment was run.
1839
+ For example, a conditional frequency distribution could be used to
1840
+ record the frequency of each word (type) in a document, given its
1841
+ length. Formally, a conditional frequency distribution can be
1842
+ defined as a function that maps from each condition to the
1843
+ FreqDist for the experiment under that condition.
1844
+
1845
+ Conditional frequency distributions are typically constructed by
1846
+ repeatedly running an experiment under a variety of conditions,
1847
+ and incrementing the sample outcome counts for the appropriate
1848
+ conditions. For example, the following code will produce a
1849
+ conditional frequency distribution that encodes how often each
1850
+ word type occurs, given the length of that word type:
1851
+
1852
+ >>> from nltk.probability import ConditionalFreqDist
1853
+ >>> from nltk.tokenize import word_tokenize
1854
+ >>> sent = "the the the dog dog some other words that we do not care about"
1855
+ >>> cfdist = ConditionalFreqDist()
1856
+ >>> for word in word_tokenize(sent):
1857
+ ... condition = len(word)
1858
+ ... cfdist[condition][word] += 1
1859
+
1860
+ An equivalent way to do this is with the initializer:
1861
+
1862
+ >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
1863
+
1864
+ The frequency distribution for each condition is accessed using
1865
+ the indexing operator:
1866
+
1867
+ >>> cfdist[3]
1868
+ FreqDist({'the': 3, 'dog': 2, 'not': 1})
1869
+ >>> cfdist[3].freq('the')
1870
+ 0.5
1871
+ >>> cfdist[3]['dog']
1872
+ 2
1873
+
1874
+ When the indexing operator is used to access the frequency
1875
+ distribution for a condition that has not been accessed before,
1876
+ ``ConditionalFreqDist`` creates a new empty FreqDist for that
1877
+ condition.
1878
+
1879
+ """
1880
+
1881
+ def __init__(self, cond_samples=None):
1882
+ """
1883
+ Construct a new empty conditional frequency distribution. In
1884
+ particular, the count for every sample, under every condition,
1885
+ is zero.
1886
+
1887
+ :param cond_samples: The samples to initialize the conditional
1888
+ frequency distribution with
1889
+ :type cond_samples: Sequence of (condition, sample) tuples
1890
+ """
1891
+ defaultdict.__init__(self, FreqDist)
1892
+
1893
+ if cond_samples:
1894
+ for (cond, sample) in cond_samples:
1895
+ self[cond][sample] += 1
1896
+
1897
+ def __reduce__(self):
1898
+ kv_pairs = ((cond, self[cond]) for cond in self.conditions())
1899
+ return (self.__class__, (), None, None, kv_pairs)
1900
+
1901
+ def conditions(self):
1902
+ """
1903
+ Return a list of the conditions that have been accessed for
1904
+ this ``ConditionalFreqDist``. Use the indexing operator to
1905
+ access the frequency distribution for a given condition.
1906
+ Note that the frequency distributions for some conditions
1907
+ may contain zero sample outcomes.
1908
+
1909
+ :rtype: list
1910
+ """
1911
+ return list(self.keys())
1912
+
1913
+ def N(self):
1914
+ """
1915
+ Return the total number of sample outcomes that have been
1916
+ recorded by this ``ConditionalFreqDist``.
1917
+
1918
+ :rtype: int
1919
+ """
1920
+ return sum(fdist.N() for fdist in self.values())
1921
+
1922
+ def plot(
1923
+ self,
1924
+ *args,
1925
+ samples=None,
1926
+ title="",
1927
+ cumulative=False,
1928
+ percents=False,
1929
+ conditions=None,
1930
+ show=True,
1931
+ **kwargs,
1932
+ ):
1933
+ """
1934
+ Plot the given samples from the conditional frequency distribution.
1935
+ For a cumulative plot, specify cumulative=True. Additional ``*args`` and
1936
+ ``**kwargs`` are passed to matplotlib's plot function.
1937
+ (Requires Matplotlib to be installed.)
1938
+
1939
+ :param samples: The samples to plot
1940
+ :type samples: list
1941
+ :param title: The title for the graph
1942
+ :type title: str
1943
+ :param cumulative: Whether the plot is cumulative. (default = False)
1944
+ :type cumulative: bool
1945
+ :param percents: Whether the plot uses percents instead of counts. (default = False)
1946
+ :type percents: bool
1947
+ :param conditions: The conditions to plot (default is all)
1948
+ :type conditions: list
1949
+ :param show: Whether to show the plot, or only return the ax.
1950
+ :type show: bool
1951
+ """
1952
+ try:
1953
+ import matplotlib.pyplot as plt # import statement fix
1954
+ except ImportError as e:
1955
+ raise ValueError(
1956
+ "The plot function requires matplotlib to be installed."
1957
+ "See https://matplotlib.org/"
1958
+ ) from e
1959
+
1960
+ if not conditions:
1961
+ conditions = self.conditions()
1962
+ else:
1963
+ conditions = [c for c in conditions if c in self]
1964
+ if not samples:
1965
+ samples = sorted({v for c in conditions for v in self[c]})
1966
+ if "linewidth" not in kwargs:
1967
+ kwargs["linewidth"] = 2
1968
+ ax = plt.gca()
1969
+ if conditions:
1970
+ freqs = []
1971
+ for condition in conditions:
1972
+ if cumulative:
1973
+ # freqs should be a list of list where each sub list will be a frequency of a condition
1974
+ freq = list(self[condition]._cumulative_frequencies(samples))
1975
+ else:
1976
+ freq = [self[condition][sample] for sample in samples]
1977
+
1978
+ if percents:
1979
+ freq = [f / self[condition].N() * 100 for f in freq]
1980
+
1981
+ freqs.append(freq)
1982
+
1983
+ if cumulative:
1984
+ ylabel = "Cumulative "
1985
+ legend_loc = "lower right"
1986
+ else:
1987
+ ylabel = ""
1988
+ legend_loc = "upper right"
1989
+
1990
+ if percents:
1991
+ ylabel += "Percents"
1992
+ else:
1993
+ ylabel += "Counts"
1994
+
1995
+ i = 0
1996
+ for freq in freqs:
1997
+ kwargs["label"] = conditions[i] # label for each condition
1998
+ i += 1
1999
+ ax.plot(freq, *args, **kwargs)
2000
+ ax.legend(loc=legend_loc)
2001
+ ax.grid(True, color="silver")
2002
+ ax.set_xticks(range(len(samples)))
2003
+ ax.set_xticklabels([str(s) for s in samples], rotation=90)
2004
+ if title:
2005
+ ax.set_title(title)
2006
+ ax.set_xlabel("Samples")
2007
+ ax.set_ylabel(ylabel)
2008
+
2009
+ if show:
2010
+ plt.show()
2011
+
2012
+ return ax
2013
+
2014
+ def tabulate(self, *args, **kwargs):
2015
+ """
2016
+ Tabulate the given samples from the conditional frequency distribution.
2017
+
2018
+ :param samples: The samples to plot
2019
+ :type samples: list
2020
+ :param conditions: The conditions to plot (default is all)
2021
+ :type conditions: list
2022
+ :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
2023
+ :type title: bool
2024
+ """
2025
+
2026
+ cumulative = _get_kwarg(kwargs, "cumulative", False)
2027
+ conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions()))
2028
+ samples = _get_kwarg(
2029
+ kwargs,
2030
+ "samples",
2031
+ sorted({v for c in conditions if c in self for v in self[c]}),
2032
+ ) # this computation could be wasted
2033
+
2034
+ width = max(len("%s" % s) for s in samples)
2035
+ freqs = dict()
2036
+ for c in conditions:
2037
+ if cumulative:
2038
+ freqs[c] = list(self[c]._cumulative_frequencies(samples))
2039
+ else:
2040
+ freqs[c] = [self[c][sample] for sample in samples]
2041
+ width = max(width, max(len("%d" % f) for f in freqs[c]))
2042
+
2043
+ condition_size = max(len("%s" % c) for c in conditions)
2044
+ print(" " * condition_size, end=" ")
2045
+ for s in samples:
2046
+ print("%*s" % (width, s), end=" ")
2047
+ print()
2048
+ for c in conditions:
2049
+ print("%*s" % (condition_size, c), end=" ")
2050
+ for f in freqs[c]:
2051
+ print("%*d" % (width, f), end=" ")
2052
+ print()
2053
+
2054
+ # Mathematical operators
2055
+
2056
+ def __add__(self, other):
2057
+ """
2058
+ Add counts from two ConditionalFreqDists.
2059
+ """
2060
+ if not isinstance(other, ConditionalFreqDist):
2061
+ return NotImplemented
2062
+ result = self.copy()
2063
+ for cond in other.conditions():
2064
+ result[cond] += other[cond]
2065
+ return result
2066
+
2067
+ def __sub__(self, other):
2068
+ """
2069
+ Subtract count, but keep only results with positive counts.
2070
+ """
2071
+ if not isinstance(other, ConditionalFreqDist):
2072
+ return NotImplemented
2073
+ result = self.copy()
2074
+ for cond in other.conditions():
2075
+ result[cond] -= other[cond]
2076
+ if not result[cond]:
2077
+ del result[cond]
2078
+ return result
2079
+
2080
+ def __or__(self, other):
2081
+ """
2082
+ Union is the maximum of value in either of the input counters.
2083
+ """
2084
+ if not isinstance(other, ConditionalFreqDist):
2085
+ return NotImplemented
2086
+ result = self.copy()
2087
+ for cond in other.conditions():
2088
+ result[cond] |= other[cond]
2089
+ return result
2090
+
2091
+ def __and__(self, other):
2092
+ """
2093
+ Intersection is the minimum of corresponding counts.
2094
+ """
2095
+ if not isinstance(other, ConditionalFreqDist):
2096
+ return NotImplemented
2097
+ result = ConditionalFreqDist()
2098
+ for cond in self.conditions():
2099
+ newfreqdist = self[cond] & other[cond]
2100
+ if newfreqdist:
2101
+ result[cond] = newfreqdist
2102
+ return result
2103
+
2104
+ # @total_ordering doesn't work here, since the class inherits from a builtin class
2105
+ def __le__(self, other):
2106
+ if not isinstance(other, ConditionalFreqDist):
2107
+ raise_unorderable_types("<=", self, other)
2108
+ return set(self.conditions()).issubset(other.conditions()) and all(
2109
+ self[c] <= other[c] for c in self.conditions()
2110
+ )
2111
+
2112
+ def __lt__(self, other):
2113
+ if not isinstance(other, ConditionalFreqDist):
2114
+ raise_unorderable_types("<", self, other)
2115
+ return self <= other and self != other
2116
+
2117
+ def __ge__(self, other):
2118
+ if not isinstance(other, ConditionalFreqDist):
2119
+ raise_unorderable_types(">=", self, other)
2120
+ return other <= self
2121
+
2122
+ def __gt__(self, other):
2123
+ if not isinstance(other, ConditionalFreqDist):
2124
+ raise_unorderable_types(">", self, other)
2125
+ return other < self
2126
+
2127
+ def deepcopy(self):
2128
+ from copy import deepcopy
2129
+
2130
+ return deepcopy(self)
2131
+
2132
+ copy = deepcopy
2133
+
2134
+ def __repr__(self):
2135
+ """
2136
+ Return a string representation of this ``ConditionalFreqDist``.
2137
+
2138
+ :rtype: str
2139
+ """
2140
+ return "<ConditionalFreqDist with %d conditions>" % len(self)
2141
+
2142
+
2143
+ class ConditionalProbDistI(dict, metaclass=ABCMeta):
2144
+ """
2145
+ A collection of probability distributions for a single experiment
2146
+ run under different conditions. Conditional probability
2147
+ distributions are used to estimate the likelihood of each sample,
2148
+ given the condition under which the experiment was run. For
2149
+ example, a conditional probability distribution could be used to
2150
+ estimate the probability of each word type in a document, given
2151
+ the length of the word type. Formally, a conditional probability
2152
+ distribution can be defined as a function that maps from each
2153
+ condition to the ``ProbDist`` for the experiment under that
2154
+ condition.
2155
+ """
2156
+
2157
+ @abstractmethod
2158
+ def __init__(self):
2159
+ """
2160
+ Classes inheriting from ConditionalProbDistI should implement __init__.
2161
+ """
2162
+
2163
+ def conditions(self):
2164
+ """
2165
+ Return a list of the conditions that are represented by
2166
+ this ``ConditionalProbDist``. Use the indexing operator to
2167
+ access the probability distribution for a given condition.
2168
+
2169
+ :rtype: list
2170
+ """
2171
+ return list(self.keys())
2172
+
2173
+ def __repr__(self):
2174
+ """
2175
+ Return a string representation of this ``ConditionalProbDist``.
2176
+
2177
+ :rtype: str
2178
+ """
2179
+ return "<%s with %d conditions>" % (type(self).__name__, len(self))
2180
+
2181
+
2182
+ class ConditionalProbDist(ConditionalProbDistI):
2183
+ """
2184
+ A conditional probability distribution modeling the experiments
2185
+ that were used to generate a conditional frequency distribution.
2186
+ A ConditionalProbDist is constructed from a
2187
+ ``ConditionalFreqDist`` and a ``ProbDist`` factory:
2188
+
2189
+ - The ``ConditionalFreqDist`` specifies the frequency
2190
+ distribution for each condition.
2191
+ - The ``ProbDist`` factory is a function that takes a
2192
+ condition's frequency distribution, and returns its
2193
+ probability distribution. A ``ProbDist`` class's name (such as
2194
+ ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
2195
+ that class's constructor.
2196
+
2197
+ The first argument to the ``ProbDist`` factory is the frequency
2198
+ distribution that it should model; and the remaining arguments are
2199
+ specified by the ``factory_args`` parameter to the
2200
+ ``ConditionalProbDist`` constructor. For example, the following
2201
+ code constructs a ``ConditionalProbDist``, where the probability
2202
+ distribution for each condition is an ``ELEProbDist`` with 10 bins:
2203
+
2204
+ >>> from nltk.corpus import brown
2205
+ >>> from nltk.probability import ConditionalFreqDist
2206
+ >>> from nltk.probability import ConditionalProbDist, ELEProbDist
2207
+ >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
2208
+ >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
2209
+ >>> cpdist['passed'].max()
2210
+ 'VBD'
2211
+ >>> cpdist['passed'].prob('VBD') #doctest: +ELLIPSIS
2212
+ 0.423...
2213
+
2214
+ """
2215
+
2216
+ def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args):
2217
+ """
2218
+ Construct a new conditional probability distribution, based on
2219
+ the given conditional frequency distribution and ``ProbDist``
2220
+ factory.
2221
+
2222
+ :type cfdist: ConditionalFreqDist
2223
+ :param cfdist: The ``ConditionalFreqDist`` specifying the
2224
+ frequency distribution for each condition.
2225
+ :type probdist_factory: class or function
2226
+ :param probdist_factory: The function or class that maps
2227
+ a condition's frequency distribution to its probability
2228
+ distribution. The function is called with the frequency
2229
+ distribution as its first argument,
2230
+ ``factory_args`` as its remaining arguments, and
2231
+ ``factory_kw_args`` as keyword arguments.
2232
+ :type factory_args: (any)
2233
+ :param factory_args: Extra arguments for ``probdist_factory``.
2234
+ These arguments are usually used to specify extra
2235
+ properties for the probability distributions of individual
2236
+ conditions, such as the number of bins they contain.
2237
+ :type factory_kw_args: (any)
2238
+ :param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
2239
+ """
2240
+ self._probdist_factory = probdist_factory
2241
+ self._factory_args = factory_args
2242
+ self._factory_kw_args = factory_kw_args
2243
+
2244
+ for condition in cfdist:
2245
+ self[condition] = probdist_factory(
2246
+ cfdist[condition], *factory_args, **factory_kw_args
2247
+ )
2248
+
2249
+ def __missing__(self, key):
2250
+ self[key] = self._probdist_factory(
2251
+ FreqDist(), *self._factory_args, **self._factory_kw_args
2252
+ )
2253
+ return self[key]
2254
+
2255
+
2256
+ class DictionaryConditionalProbDist(ConditionalProbDistI):
2257
+ """
2258
+ An alternative ConditionalProbDist that simply wraps a dictionary of
2259
+ ProbDists rather than creating these from FreqDists.
2260
+ """
2261
+
2262
+ def __init__(self, probdist_dict):
2263
+ """
2264
+ :param probdist_dict: a dictionary containing the probdists indexed
2265
+ by the conditions
2266
+ :type probdist_dict: dict any -> probdist
2267
+ """
2268
+ self.update(probdist_dict)
2269
+
2270
+ def __missing__(self, key):
2271
+ self[key] = DictionaryProbDist()
2272
+ return self[key]
2273
+
2274
+
2275
+ ##//////////////////////////////////////////////////////
2276
+ ## Adding in log-space.
2277
+ ##//////////////////////////////////////////////////////
2278
+
2279
+ # If the difference is bigger than this, then just take the bigger one:
2280
+ _ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
2281
+
2282
+
2283
+ def add_logs(logx, logy):
2284
+ """
2285
+ Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
2286
+ *log(x+y)*. Conceptually, this is the same as returning
2287
+ ``log(2**(logx)+2**(logy))``, but the actual implementation
2288
+ avoids overflow errors that could result from direct computation.
2289
+ """
2290
+ if logx < logy + _ADD_LOGS_MAX_DIFF:
2291
+ return logy
2292
+ if logy < logx + _ADD_LOGS_MAX_DIFF:
2293
+ return logx
2294
+ base = min(logx, logy)
2295
+ return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2)
2296
+
2297
+
2298
+ def sum_logs(logs):
2299
+ return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF
2300
+
2301
+
2302
+ ##//////////////////////////////////////////////////////
2303
+ ## Probabilistic Mix-in
2304
+ ##//////////////////////////////////////////////////////
2305
+
2306
+
2307
+ class ProbabilisticMixIn:
2308
+ """
2309
+ A mix-in class to associate probabilities with other classes
2310
+ (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
2311
+ define a new class that derives from an existing class and from
2312
+ ProbabilisticMixIn. You will need to define a new constructor for
2313
+ the new class, which explicitly calls the constructors of both its
2314
+ parent classes. For example:
2315
+
2316
+ >>> from nltk.probability import ProbabilisticMixIn
2317
+ >>> class A:
2318
+ ... def __init__(self, x, y): self.data = (x,y)
2319
+ ...
2320
+ >>> class ProbabilisticA(A, ProbabilisticMixIn):
2321
+ ... def __init__(self, x, y, **prob_kwarg):
2322
+ ... A.__init__(self, x, y)
2323
+ ... ProbabilisticMixIn.__init__(self, **prob_kwarg)
2324
+
2325
+ See the documentation for the ProbabilisticMixIn
2326
+ ``constructor<__init__>`` for information about the arguments it
2327
+ expects.
2328
+
2329
+ You should generally also redefine the string representation
2330
+ methods, the comparison methods, and the hashing method.
2331
+ """
2332
+
2333
+ def __init__(self, **kwargs):
2334
+ """
2335
+ Initialize this object's probability. This initializer should
2336
+ be called by subclass constructors. ``prob`` should generally be
2337
+ the first argument for those constructors.
2338
+
2339
+ :param prob: The probability associated with the object.
2340
+ :type prob: float
2341
+ :param logprob: The log of the probability associated with
2342
+ the object.
2343
+ :type logprob: float
2344
+ """
2345
+ if "prob" in kwargs:
2346
+ if "logprob" in kwargs:
2347
+ raise TypeError("Must specify either prob or logprob " "(not both)")
2348
+ else:
2349
+ ProbabilisticMixIn.set_prob(self, kwargs["prob"])
2350
+ elif "logprob" in kwargs:
2351
+ ProbabilisticMixIn.set_logprob(self, kwargs["logprob"])
2352
+ else:
2353
+ self.__prob = self.__logprob = None
2354
+
2355
+ def set_prob(self, prob):
2356
+ """
2357
+ Set the probability associated with this object to ``prob``.
2358
+
2359
+ :param prob: The new probability
2360
+ :type prob: float
2361
+ """
2362
+ self.__prob = prob
2363
+ self.__logprob = None
2364
+
2365
+ def set_logprob(self, logprob):
2366
+ """
2367
+ Set the log probability associated with this object to
2368
+ ``logprob``. I.e., set the probability associated with this
2369
+ object to ``2**(logprob)``.
2370
+
2371
+ :param logprob: The new log probability
2372
+ :type logprob: float
2373
+ """
2374
+ self.__logprob = logprob
2375
+ self.__prob = None
2376
+
2377
+ def prob(self):
2378
+ """
2379
+ Return the probability associated with this object.
2380
+
2381
+ :rtype: float
2382
+ """
2383
+ if self.__prob is None:
2384
+ if self.__logprob is None:
2385
+ return None
2386
+ self.__prob = 2 ** (self.__logprob)
2387
+ return self.__prob
2388
+
2389
+ def logprob(self):
2390
+ """
2391
+ Return ``log(p)``, where ``p`` is the probability associated
2392
+ with this object.
2393
+
2394
+ :rtype: float
2395
+ """
2396
+ if self.__logprob is None:
2397
+ if self.__prob is None:
2398
+ return None
2399
+ self.__logprob = math.log(self.__prob, 2)
2400
+ return self.__logprob
2401
+
2402
+
2403
+ class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
2404
+ def set_prob(self, prob):
2405
+ raise ValueError("%s is immutable" % self.__class__.__name__)
2406
+
2407
+ def set_logprob(self, prob):
2408
+ raise ValueError("%s is immutable" % self.__class__.__name__)
2409
+
2410
+
2411
+ ## Helper function for processing keyword arguments
2412
+
2413
+
2414
+ def _get_kwarg(kwargs, key, default):
2415
+ if key in kwargs:
2416
+ arg = kwargs[key]
2417
+ del kwargs[key]
2418
+ else:
2419
+ arg = default
2420
+ return arg
2421
+
2422
+
2423
+ ##//////////////////////////////////////////////////////
2424
+ ## Demonstration
2425
+ ##//////////////////////////////////////////////////////
2426
+
2427
+
2428
+ def _create_rand_fdist(numsamples, numoutcomes):
2429
+ """
2430
+ Create a new frequency distribution, with random samples. The
2431
+ samples are numbers from 1 to ``numsamples``, and are generated by
2432
+ summing two numbers, each of which has a uniform distribution.
2433
+ """
2434
+
2435
+ fdist = FreqDist()
2436
+ for x in range(numoutcomes):
2437
+ y = random.randint(1, (1 + numsamples) // 2) + random.randint(
2438
+ 0, numsamples // 2
2439
+ )
2440
+ fdist[y] += 1
2441
+ return fdist
2442
+
2443
+
2444
+ def _create_sum_pdist(numsamples):
2445
+ """
2446
+ Return the true probability distribution for the experiment
2447
+ ``_create_rand_fdist(numsamples, x)``.
2448
+ """
2449
+ fdist = FreqDist()
2450
+ for x in range(1, (1 + numsamples) // 2 + 1):
2451
+ for y in range(0, numsamples // 2 + 1):
2452
+ fdist[x + y] += 1
2453
+ return MLEProbDist(fdist)
2454
+
2455
+
2456
+ def demo(numsamples=6, numoutcomes=500):
2457
+ """
2458
+ A demonstration of frequency distributions and probability
2459
+ distributions. This demonstration creates three frequency
2460
+ distributions with, and uses them to sample a random process with
2461
+ ``numsamples`` samples. Each frequency distribution is sampled
2462
+ ``numoutcomes`` times. These three frequency distributions are
2463
+ then used to build six probability distributions. Finally, the
2464
+ probability estimates of these distributions are compared to the
2465
+ actual probability of each sample.
2466
+
2467
+ :type numsamples: int
2468
+ :param numsamples: The number of samples to use in each demo
2469
+ frequency distributions.
2470
+ :type numoutcomes: int
2471
+ :param numoutcomes: The total number of outcomes for each
2472
+ demo frequency distribution. These outcomes are divided into
2473
+ ``numsamples`` bins.
2474
+ :rtype: None
2475
+ """
2476
+
2477
+ # Randomly sample a stochastic process three times.
2478
+ fdist1 = _create_rand_fdist(numsamples, numoutcomes)
2479
+ fdist2 = _create_rand_fdist(numsamples, numoutcomes)
2480
+ fdist3 = _create_rand_fdist(numsamples, numoutcomes)
2481
+
2482
+ # Use our samples to create probability distributions.
2483
+ pdists = [
2484
+ MLEProbDist(fdist1),
2485
+ LidstoneProbDist(fdist1, 0.5, numsamples),
2486
+ HeldoutProbDist(fdist1, fdist2, numsamples),
2487
+ HeldoutProbDist(fdist2, fdist1, numsamples),
2488
+ CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
2489
+ SimpleGoodTuringProbDist(fdist1),
2490
+ SimpleGoodTuringProbDist(fdist1, 7),
2491
+ _create_sum_pdist(numsamples),
2492
+ ]
2493
+
2494
+ # Find the probability of each sample.
2495
+ vals = []
2496
+ for n in range(1, numsamples + 1):
2497
+ vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists]))
2498
+
2499
+ # Print the results in a formatted table.
2500
+ print(
2501
+ "%d samples (1-%d); %d outcomes were sampled for each FreqDist"
2502
+ % (numsamples, numsamples, numoutcomes)
2503
+ )
2504
+ print("=" * 9 * (len(pdists) + 2))
2505
+ FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual"
2506
+ print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
2507
+ print("-" * 9 * (len(pdists) + 2))
2508
+ FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f"
2509
+ for val in vals:
2510
+ print(FORMATSTR % val)
2511
+
2512
+ # Print the totals for each column (should all be 1.0)
2513
+ zvals = list(zip(*vals))
2514
+ sums = [sum(val) for val in zvals[1:]]
2515
+ print("-" * 9 * (len(pdists) + 2))
2516
+ FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f"
2517
+ print(FORMATSTR % tuple(sums))
2518
+ print("=" * 9 * (len(pdists) + 2))
2519
+
2520
+ # Display the distributions themselves, if they're short enough.
2521
+ if len("%s" % fdist1) < 70:
2522
+ print(" fdist1: %s" % fdist1)
2523
+ print(" fdist2: %s" % fdist2)
2524
+ print(" fdist3: %s" % fdist3)
2525
+ print()
2526
+
2527
+ print("Generating:")
2528
+ for pdist in pdists:
2529
+ fdist = FreqDist(pdist.generate() for i in range(5000))
2530
+ print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
2531
+ print()
2532
+
2533
+
2534
+ def gt_demo():
2535
+ from nltk import corpus
2536
+
2537
+ emma_words = corpus.gutenberg.words("austen-emma.txt")
2538
+ fd = FreqDist(emma_words)
2539
+ sgt = SimpleGoodTuringProbDist(fd)
2540
+ print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring"))
2541
+ fd_keys_sorted = (
2542
+ key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True)
2543
+ )
2544
+ for key in fd_keys_sorted:
2545
+ print("%18s %8d %14e" % (key, fd[key], sgt.prob(key)))
2546
+
2547
+
2548
+ if __name__ == "__main__":
2549
+ demo(6, 10)
2550
+ demo(5, 5000)
2551
+ gt_demo()
2552
+
2553
+ __all__ = [
2554
+ "ConditionalFreqDist",
2555
+ "ConditionalProbDist",
2556
+ "ConditionalProbDistI",
2557
+ "CrossValidationProbDist",
2558
+ "DictionaryConditionalProbDist",
2559
+ "DictionaryProbDist",
2560
+ "ELEProbDist",
2561
+ "FreqDist",
2562
+ "SimpleGoodTuringProbDist",
2563
+ "HeldoutProbDist",
2564
+ "ImmutableProbabilisticMixIn",
2565
+ "LaplaceProbDist",
2566
+ "LidstoneProbDist",
2567
+ "MLEProbDist",
2568
+ "MutableProbDist",
2569
+ "KneserNeyProbDist",
2570
+ "ProbDistI",
2571
+ "ProbabilisticMixIn",
2572
+ "UniformProbDist",
2573
+ "WittenBellProbDist",
2574
+ "add_logs",
2575
+ "log_likelihood",
2576
+ "sum_logs",
2577
+ "entropy",
2578
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/stem/api.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Stemmer Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Steven Bird <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from abc import ABCMeta, abstractmethod
11
+
12
+
13
+ class StemmerI(metaclass=ABCMeta):
14
+ """
15
+ A processing interface for removing morphological affixes from
16
+ words. This process is known as stemming.
17
+
18
+ """
19
+
20
+ @abstractmethod
21
+ def stem(self, token):
22
+ """
23
+ Strip affixes from the token and return the stem.
24
+
25
+ :param token: The token that should be stemmed.
26
+ :type token: str
27
+ """
llmeval-env/lib/python3.10/site-packages/nltk/stem/util.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Stemmer Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Helder <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+
9
+ def suffix_replace(original, old, new):
10
+ """
11
+ Replaces the old suffix of the original string by a new suffix
12
+ """
13
+ return original[: -len(old)] + new
14
+
15
+
16
+ def prefix_replace(original, old, new):
17
+ """
18
+ Replaces the old prefix of the original string by a new suffix
19
+
20
+ :param original: string
21
+ :param old: string
22
+ :param new: string
23
+ :return: string
24
+ """
25
+ return new + original[len(old) :]
llmeval-env/lib/python3.10/site-packages/nltk/text.py ADDED
@@ -0,0 +1,779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Texts
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ This module brings together a variety of NLTK functionality for
11
+ text analysis, and provides simple, interactive interfaces.
12
+ Functionality includes: concordancing, collocation discovery,
13
+ regular expression search over tokenized strings, and
14
+ distributional similarity.
15
+ """
16
+
17
+ import re
18
+ import sys
19
+ from collections import Counter, defaultdict, namedtuple
20
+ from functools import reduce
21
+ from math import log
22
+
23
+ from nltk.collocations import BigramCollocationFinder
24
+ from nltk.lm import MLE
25
+ from nltk.lm.preprocessing import padded_everygram_pipeline
26
+ from nltk.metrics import BigramAssocMeasures, f_measure
27
+ from nltk.probability import ConditionalFreqDist as CFD
28
+ from nltk.probability import FreqDist
29
+ from nltk.tokenize import sent_tokenize
30
+ from nltk.util import LazyConcatenation, tokenwrap
31
+
32
+ ConcordanceLine = namedtuple(
33
+ "ConcordanceLine",
34
+ ["left", "query", "right", "offset", "left_print", "right_print", "line"],
35
+ )
36
+
37
+
38
+ class ContextIndex:
39
+ """
40
+ A bidirectional index between words and their 'contexts' in a text.
41
+ The context of a word is usually defined to be the words that occur
42
+ in a fixed window around the word; but other definitions may also
43
+ be used by providing a custom context function.
44
+ """
45
+
46
+ @staticmethod
47
+ def _default_context(tokens, i):
48
+ """One left token and one right token, normalized to lowercase"""
49
+ left = tokens[i - 1].lower() if i != 0 else "*START*"
50
+ right = tokens[i + 1].lower() if i != len(tokens) - 1 else "*END*"
51
+ return (left, right)
52
+
53
+ def __init__(self, tokens, context_func=None, filter=None, key=lambda x: x):
54
+ self._key = key
55
+ self._tokens = tokens
56
+ if context_func:
57
+ self._context_func = context_func
58
+ else:
59
+ self._context_func = self._default_context
60
+ if filter:
61
+ tokens = [t for t in tokens if filter(t)]
62
+ self._word_to_contexts = CFD(
63
+ (self._key(w), self._context_func(tokens, i)) for i, w in enumerate(tokens)
64
+ )
65
+ self._context_to_words = CFD(
66
+ (self._context_func(tokens, i), self._key(w)) for i, w in enumerate(tokens)
67
+ )
68
+
69
+ def tokens(self):
70
+ """
71
+ :rtype: list(str)
72
+ :return: The document that this context index was
73
+ created from.
74
+ """
75
+ return self._tokens
76
+
77
+ def word_similarity_dict(self, word):
78
+ """
79
+ Return a dictionary mapping from words to 'similarity scores,'
80
+ indicating how often these two words occur in the same
81
+ context.
82
+ """
83
+ word = self._key(word)
84
+ word_contexts = set(self._word_to_contexts[word])
85
+
86
+ scores = {}
87
+ for w, w_contexts in self._word_to_contexts.items():
88
+ scores[w] = f_measure(word_contexts, set(w_contexts))
89
+
90
+ return scores
91
+
92
+ def similar_words(self, word, n=20):
93
+ scores = defaultdict(int)
94
+ for c in self._word_to_contexts[self._key(word)]:
95
+ for w in self._context_to_words[c]:
96
+ if w != word:
97
+ scores[w] += (
98
+ self._context_to_words[c][word] * self._context_to_words[c][w]
99
+ )
100
+ return sorted(scores, key=scores.get, reverse=True)[:n]
101
+
102
+ def common_contexts(self, words, fail_on_unknown=False):
103
+ """
104
+ Find contexts where the specified words can all appear; and
105
+ return a frequency distribution mapping each context to the
106
+ number of times that context was used.
107
+
108
+ :param words: The words used to seed the similarity search
109
+ :type words: str
110
+ :param fail_on_unknown: If true, then raise a value error if
111
+ any of the given words do not occur at all in the index.
112
+ """
113
+ words = [self._key(w) for w in words]
114
+ contexts = [set(self._word_to_contexts[w]) for w in words]
115
+ empty = [words[i] for i in range(len(words)) if not contexts[i]]
116
+ common = reduce(set.intersection, contexts)
117
+ if empty and fail_on_unknown:
118
+ raise ValueError("The following word(s) were not found:", " ".join(words))
119
+ elif not common:
120
+ # nothing in common -- just return an empty freqdist.
121
+ return FreqDist()
122
+ else:
123
+ fd = FreqDist(
124
+ c for w in words for c in self._word_to_contexts[w] if c in common
125
+ )
126
+ return fd
127
+
128
+
129
+ class ConcordanceIndex:
130
+ """
131
+ An index that can be used to look up the offset locations at which
132
+ a given word occurs in a document.
133
+ """
134
+
135
+ def __init__(self, tokens, key=lambda x: x):
136
+ """
137
+ Construct a new concordance index.
138
+
139
+ :param tokens: The document (list of tokens) that this
140
+ concordance index was created from. This list can be used
141
+ to access the context of a given word occurrence.
142
+ :param key: A function that maps each token to a normalized
143
+ version that will be used as a key in the index. E.g., if
144
+ you use ``key=lambda s:s.lower()``, then the index will be
145
+ case-insensitive.
146
+ """
147
+ self._tokens = tokens
148
+ """The document (list of tokens) that this concordance index
149
+ was created from."""
150
+
151
+ self._key = key
152
+ """Function mapping each token to an index key (or None)."""
153
+
154
+ self._offsets = defaultdict(list)
155
+ """Dictionary mapping words (or keys) to lists of offset indices."""
156
+ # Initialize the index (self._offsets)
157
+ for index, word in enumerate(tokens):
158
+ word = self._key(word)
159
+ self._offsets[word].append(index)
160
+
161
+ def tokens(self):
162
+ """
163
+ :rtype: list(str)
164
+ :return: The document that this concordance index was
165
+ created from.
166
+ """
167
+ return self._tokens
168
+
169
+ def offsets(self, word):
170
+ """
171
+ :rtype: list(int)
172
+ :return: A list of the offset positions at which the given
173
+ word occurs. If a key function was specified for the
174
+ index, then given word's key will be looked up.
175
+ """
176
+ word = self._key(word)
177
+ return self._offsets[word]
178
+
179
+ def __repr__(self):
180
+ return "<ConcordanceIndex for %d tokens (%d types)>" % (
181
+ len(self._tokens),
182
+ len(self._offsets),
183
+ )
184
+
185
+ def find_concordance(self, word, width=80):
186
+ """
187
+ Find all concordance lines given the query word.
188
+
189
+ Provided with a list of words, these will be found as a phrase.
190
+ """
191
+ if isinstance(word, list):
192
+ phrase = word
193
+ else:
194
+ phrase = [word]
195
+
196
+ half_width = (width - len(" ".join(phrase)) - 2) // 2
197
+ context = width // 4 # approx number of words of context
198
+
199
+ # Find the instances of the word to create the ConcordanceLine
200
+ concordance_list = []
201
+ offsets = self.offsets(phrase[0])
202
+ for i, word in enumerate(phrase[1:]):
203
+ word_offsets = {offset - i - 1 for offset in self.offsets(word)}
204
+ offsets = sorted(word_offsets.intersection(offsets))
205
+ if offsets:
206
+ for i in offsets:
207
+ query_word = " ".join(self._tokens[i : i + len(phrase)])
208
+ # Find the context of query word.
209
+ left_context = self._tokens[max(0, i - context) : i]
210
+ right_context = self._tokens[i + len(phrase) : i + context]
211
+ # Create the pretty lines with the query_word in the middle.
212
+ left_print = " ".join(left_context)[-half_width:]
213
+ right_print = " ".join(right_context)[:half_width]
214
+ # The WYSIWYG line of the concordance.
215
+ line_print = " ".join([left_print, query_word, right_print])
216
+ # Create the ConcordanceLine
217
+ concordance_line = ConcordanceLine(
218
+ left_context,
219
+ query_word,
220
+ right_context,
221
+ i,
222
+ left_print,
223
+ right_print,
224
+ line_print,
225
+ )
226
+ concordance_list.append(concordance_line)
227
+ return concordance_list
228
+
229
+ def print_concordance(self, word, width=80, lines=25):
230
+ """
231
+ Print concordance lines given the query word.
232
+ :param word: The target word or phrase (a list of strings)
233
+ :type word: str or list
234
+ :param lines: The number of lines to display (default=25)
235
+ :type lines: int
236
+ :param width: The width of each line, in characters (default=80)
237
+ :type width: int
238
+ :param save: The option to save the concordance.
239
+ :type save: bool
240
+ """
241
+ concordance_list = self.find_concordance(word, width=width)
242
+
243
+ if not concordance_list:
244
+ print("no matches")
245
+ else:
246
+ lines = min(lines, len(concordance_list))
247
+ print(f"Displaying {lines} of {len(concordance_list)} matches:")
248
+ for i, concordance_line in enumerate(concordance_list[:lines]):
249
+ print(concordance_line.line)
250
+
251
+
252
+ class TokenSearcher:
253
+ """
254
+ A class that makes it easier to use regular expressions to search
255
+ over tokenized strings. The tokenized string is converted to a
256
+ string where tokens are marked with angle brackets -- e.g.,
257
+ ``'<the><window><is><still><open>'``. The regular expression
258
+ passed to the ``findall()`` method is modified to treat angle
259
+ brackets as non-capturing parentheses, in addition to matching the
260
+ token boundaries; and to have ``'.'`` not match the angle brackets.
261
+ """
262
+
263
+ def __init__(self, tokens):
264
+ self._raw = "".join("<" + w + ">" for w in tokens)
265
+
266
+ def findall(self, regexp):
267
+ """
268
+ Find instances of the regular expression in the text.
269
+ The text is a list of tokens, and a regexp pattern to match
270
+ a single token must be surrounded by angle brackets. E.g.
271
+
272
+ >>> from nltk.text import TokenSearcher
273
+ >>> from nltk.book import text1, text5, text9
274
+ >>> text5.findall("<.*><.*><bro>")
275
+ you rule bro; telling you bro; u twizted bro
276
+ >>> text1.findall("<a>(<.*>)<man>")
277
+ monied; nervous; dangerous; white; white; white; pious; queer; good;
278
+ mature; white; Cape; great; wise; wise; butterless; white; fiendish;
279
+ pale; furious; better; certain; complete; dismasted; younger; brave;
280
+ brave; brave; brave
281
+ >>> text9.findall("<th.*>{3,}")
282
+ thread through those; the thought that; that the thing; the thing
283
+ that; that that thing; through these than through; them that the;
284
+ through the thick; them that they; thought that the
285
+
286
+ :param regexp: A regular expression
287
+ :type regexp: str
288
+ """
289
+ # preprocess the regular expression
290
+ regexp = re.sub(r"\s", "", regexp)
291
+ regexp = re.sub(r"<", "(?:<(?:", regexp)
292
+ regexp = re.sub(r">", ")>)", regexp)
293
+ regexp = re.sub(r"(?<!\\)\.", "[^>]", regexp)
294
+
295
+ # perform the search
296
+ hits = re.findall(regexp, self._raw)
297
+
298
+ # Sanity check
299
+ for h in hits:
300
+ if not h.startswith("<") and h.endswith(">"):
301
+ raise ValueError("Bad regexp for TokenSearcher.findall")
302
+
303
+ # postprocess the output
304
+ hits = [h[1:-1].split("><") for h in hits]
305
+ return hits
306
+
307
+
308
+ class Text:
309
+ """
310
+ A wrapper around a sequence of simple (string) tokens, which is
311
+ intended to support initial exploration of texts (via the
312
+ interactive console). Its methods perform a variety of analyses
313
+ on the text's contexts (e.g., counting, concordancing, collocation
314
+ discovery), and display the results. If you wish to write a
315
+ program which makes use of these analyses, then you should bypass
316
+ the ``Text`` class, and use the appropriate analysis function or
317
+ class directly instead.
318
+
319
+ A ``Text`` is typically initialized from a given document or
320
+ corpus. E.g.:
321
+
322
+ >>> import nltk.corpus
323
+ >>> from nltk.text import Text
324
+ >>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
325
+
326
+ """
327
+
328
+ # This defeats lazy loading, but makes things faster. This
329
+ # *shouldn't* be necessary because the corpus view *should* be
330
+ # doing intelligent caching, but without this it's running slow.
331
+ # Look into whether the caching is working correctly.
332
+ _COPY_TOKENS = True
333
+
334
+ def __init__(self, tokens, name=None):
335
+ """
336
+ Create a Text object.
337
+
338
+ :param tokens: The source text.
339
+ :type tokens: sequence of str
340
+ """
341
+ if self._COPY_TOKENS:
342
+ tokens = list(tokens)
343
+ self.tokens = tokens
344
+
345
+ if name:
346
+ self.name = name
347
+ elif "]" in tokens[:20]:
348
+ end = tokens[:20].index("]")
349
+ self.name = " ".join(str(tok) for tok in tokens[1:end])
350
+ else:
351
+ self.name = " ".join(str(tok) for tok in tokens[:8]) + "..."
352
+
353
+ # ////////////////////////////////////////////////////////////
354
+ # Support item & slice access
355
+ # ////////////////////////////////////////////////////////////
356
+
357
+ def __getitem__(self, i):
358
+ return self.tokens[i]
359
+
360
+ def __len__(self):
361
+ return len(self.tokens)
362
+
363
+ # ////////////////////////////////////////////////////////////
364
+ # Interactive console methods
365
+ # ////////////////////////////////////////////////////////////
366
+
367
+ def concordance(self, word, width=79, lines=25):
368
+ """
369
+ Prints a concordance for ``word`` with the specified context window.
370
+ Word matching is not case-sensitive.
371
+
372
+ :param word: The target word or phrase (a list of strings)
373
+ :type word: str or list
374
+ :param width: The width of each line, in characters (default=80)
375
+ :type width: int
376
+ :param lines: The number of lines to display (default=25)
377
+ :type lines: int
378
+
379
+ :seealso: ``ConcordanceIndex``
380
+ """
381
+ if "_concordance_index" not in self.__dict__:
382
+ self._concordance_index = ConcordanceIndex(
383
+ self.tokens, key=lambda s: s.lower()
384
+ )
385
+
386
+ return self._concordance_index.print_concordance(word, width, lines)
387
+
388
+ def concordance_list(self, word, width=79, lines=25):
389
+ """
390
+ Generate a concordance for ``word`` with the specified context window.
391
+ Word matching is not case-sensitive.
392
+
393
+ :param word: The target word or phrase (a list of strings)
394
+ :type word: str or list
395
+ :param width: The width of each line, in characters (default=80)
396
+ :type width: int
397
+ :param lines: The number of lines to display (default=25)
398
+ :type lines: int
399
+
400
+ :seealso: ``ConcordanceIndex``
401
+ """
402
+ if "_concordance_index" not in self.__dict__:
403
+ self._concordance_index = ConcordanceIndex(
404
+ self.tokens, key=lambda s: s.lower()
405
+ )
406
+ return self._concordance_index.find_concordance(word, width)[:lines]
407
+
408
+ def collocation_list(self, num=20, window_size=2):
409
+ """
410
+ Return collocations derived from the text, ignoring stopwords.
411
+
412
+ >>> from nltk.book import text4
413
+ >>> text4.collocation_list()[:2]
414
+ [('United', 'States'), ('fellow', 'citizens')]
415
+
416
+ :param num: The maximum number of collocations to return.
417
+ :type num: int
418
+ :param window_size: The number of tokens spanned by a collocation (default=2)
419
+ :type window_size: int
420
+ :rtype: list(tuple(str, str))
421
+ """
422
+ if not (
423
+ "_collocations" in self.__dict__
424
+ and self._num == num
425
+ and self._window_size == window_size
426
+ ):
427
+ self._num = num
428
+ self._window_size = window_size
429
+
430
+ # print("Building collocations list")
431
+ from nltk.corpus import stopwords
432
+
433
+ ignored_words = stopwords.words("english")
434
+ finder = BigramCollocationFinder.from_words(self.tokens, window_size)
435
+ finder.apply_freq_filter(2)
436
+ finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
437
+ bigram_measures = BigramAssocMeasures()
438
+ self._collocations = list(
439
+ finder.nbest(bigram_measures.likelihood_ratio, num)
440
+ )
441
+ return self._collocations
442
+
443
+ def collocations(self, num=20, window_size=2):
444
+ """
445
+ Print collocations derived from the text, ignoring stopwords.
446
+
447
+ >>> from nltk.book import text4
448
+ >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE
449
+ United States; fellow citizens; years ago; four years; Federal
450
+ Government; General Government; American people; Vice President; God
451
+ bless; Chief Justice; one another; fellow Americans; Old World;
452
+ Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian
453
+ tribes; public debt; foreign nations
454
+
455
+
456
+ :param num: The maximum number of collocations to print.
457
+ :type num: int
458
+ :param window_size: The number of tokens spanned by a collocation (default=2)
459
+ :type window_size: int
460
+ """
461
+
462
+ collocation_strings = [
463
+ w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size)
464
+ ]
465
+ print(tokenwrap(collocation_strings, separator="; "))
466
+
467
+ def count(self, word):
468
+ """
469
+ Count the number of times this word appears in the text.
470
+ """
471
+ return self.tokens.count(word)
472
+
473
+ def index(self, word):
474
+ """
475
+ Find the index of the first occurrence of the word in the text.
476
+ """
477
+ return self.tokens.index(word)
478
+
479
+ def readability(self, method):
480
+ # code from nltk_contrib.readability
481
+ raise NotImplementedError
482
+
483
+ def similar(self, word, num=20):
484
+ """
485
+ Distributional similarity: find other words which appear in the
486
+ same contexts as the specified word; list most similar words first.
487
+
488
+ :param word: The word used to seed the similarity search
489
+ :type word: str
490
+ :param num: The number of words to generate (default=20)
491
+ :type num: int
492
+ :seealso: ContextIndex.similar_words()
493
+ """
494
+ if "_word_context_index" not in self.__dict__:
495
+ # print('Building word-context index...')
496
+ self._word_context_index = ContextIndex(
497
+ self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower()
498
+ )
499
+
500
+ # words = self._word_context_index.similar_words(word, num)
501
+
502
+ word = word.lower()
503
+ wci = self._word_context_index._word_to_contexts
504
+ if word in wci.conditions():
505
+ contexts = set(wci[word])
506
+ fd = Counter(
507
+ w
508
+ for w in wci.conditions()
509
+ for c in wci[w]
510
+ if c in contexts and not w == word
511
+ )
512
+ words = [w for w, _ in fd.most_common(num)]
513
+ print(tokenwrap(words))
514
+ else:
515
+ print("No matches")
516
+
517
+ def common_contexts(self, words, num=20):
518
+ """
519
+ Find contexts where the specified words appear; list
520
+ most frequent common contexts first.
521
+
522
+ :param words: The words used to seed the similarity search
523
+ :type words: str
524
+ :param num: The number of words to generate (default=20)
525
+ :type num: int
526
+ :seealso: ContextIndex.common_contexts()
527
+ """
528
+ if "_word_context_index" not in self.__dict__:
529
+ # print('Building word-context index...')
530
+ self._word_context_index = ContextIndex(
531
+ self.tokens, key=lambda s: s.lower()
532
+ )
533
+
534
+ try:
535
+ fd = self._word_context_index.common_contexts(words, True)
536
+ if not fd:
537
+ print("No common contexts were found")
538
+ else:
539
+ ranked_contexts = [w for w, _ in fd.most_common(num)]
540
+ print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts))
541
+
542
+ except ValueError as e:
543
+ print(e)
544
+
545
+ def dispersion_plot(self, words):
546
+ """
547
+ Produce a plot showing the distribution of the words through the text.
548
+ Requires pylab to be installed.
549
+
550
+ :param words: The words to be plotted
551
+ :type words: list(str)
552
+ :seealso: nltk.draw.dispersion_plot()
553
+ """
554
+ from nltk.draw import dispersion_plot
555
+
556
+ dispersion_plot(self, words)
557
+
558
+ def _train_default_ngram_lm(self, tokenized_sents, n=3):
559
+ train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents)
560
+ model = MLE(order=n)
561
+ model.fit(train_data, padded_sents)
562
+ return model
563
+
564
+ def generate(self, length=100, text_seed=None, random_seed=42):
565
+ """
566
+ Print random text, generated using a trigram language model.
567
+ See also `help(nltk.lm)`.
568
+
569
+ :param length: The length of text to generate (default=100)
570
+ :type length: int
571
+
572
+ :param text_seed: Generation can be conditioned on preceding context.
573
+ :type text_seed: list(str)
574
+
575
+ :param random_seed: A random seed or an instance of `random.Random`. If provided,
576
+ makes the random sampling part of generation reproducible. (default=42)
577
+ :type random_seed: int
578
+ """
579
+ # Create the model when using it the first time.
580
+ self._tokenized_sents = [
581
+ sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens))
582
+ ]
583
+ if not hasattr(self, "_trigram_model"):
584
+ print("Building ngram index...", file=sys.stderr)
585
+ self._trigram_model = self._train_default_ngram_lm(
586
+ self._tokenized_sents, n=3
587
+ )
588
+
589
+ generated_tokens = []
590
+
591
+ assert length > 0, "The `length` must be more than 0."
592
+ while len(generated_tokens) < length:
593
+ for idx, token in enumerate(
594
+ self._trigram_model.generate(
595
+ length, text_seed=text_seed, random_seed=random_seed
596
+ )
597
+ ):
598
+ if token == "<s>":
599
+ continue
600
+ if token == "</s>":
601
+ break
602
+ generated_tokens.append(token)
603
+ random_seed += 1
604
+
605
+ prefix = " ".join(text_seed) + " " if text_seed else ""
606
+ output_str = prefix + tokenwrap(generated_tokens[:length])
607
+ print(output_str)
608
+ return output_str
609
+
610
+ def plot(self, *args):
611
+ """
612
+ See documentation for FreqDist.plot()
613
+ :seealso: nltk.prob.FreqDist.plot()
614
+ """
615
+ return self.vocab().plot(*args)
616
+
617
+ def vocab(self):
618
+ """
619
+ :seealso: nltk.prob.FreqDist
620
+ """
621
+ if "_vocab" not in self.__dict__:
622
+ # print("Building vocabulary index...")
623
+ self._vocab = FreqDist(self)
624
+ return self._vocab
625
+
626
+ def findall(self, regexp):
627
+ """
628
+ Find instances of the regular expression in the text.
629
+ The text is a list of tokens, and a regexp pattern to match
630
+ a single token must be surrounded by angle brackets. E.g.
631
+
632
+ >>> from nltk.book import text1, text5, text9
633
+ >>> text5.findall("<.*><.*><bro>")
634
+ you rule bro; telling you bro; u twizted bro
635
+ >>> text1.findall("<a>(<.*>)<man>")
636
+ monied; nervous; dangerous; white; white; white; pious; queer; good;
637
+ mature; white; Cape; great; wise; wise; butterless; white; fiendish;
638
+ pale; furious; better; certain; complete; dismasted; younger; brave;
639
+ brave; brave; brave
640
+ >>> text9.findall("<th.*>{3,}")
641
+ thread through those; the thought that; that the thing; the thing
642
+ that; that that thing; through these than through; them that the;
643
+ through the thick; them that they; thought that the
644
+
645
+ :param regexp: A regular expression
646
+ :type regexp: str
647
+ """
648
+
649
+ if "_token_searcher" not in self.__dict__:
650
+ self._token_searcher = TokenSearcher(self)
651
+
652
+ hits = self._token_searcher.findall(regexp)
653
+ hits = [" ".join(h) for h in hits]
654
+ print(tokenwrap(hits, "; "))
655
+
656
+ # ////////////////////////////////////////////////////////////
657
+ # Helper Methods
658
+ # ////////////////////////////////////////////////////////////
659
+
660
+ _CONTEXT_RE = re.compile(r"\w+|[\.\!\?]")
661
+
662
+ def _context(self, tokens, i):
663
+ """
664
+ One left & one right token, both case-normalized. Skip over
665
+ non-sentence-final punctuation. Used by the ``ContextIndex``
666
+ that is created for ``similar()`` and ``common_contexts()``.
667
+ """
668
+ # Left context
669
+ j = i - 1
670
+ while j >= 0 and not self._CONTEXT_RE.match(tokens[j]):
671
+ j -= 1
672
+ left = tokens[j] if j != 0 else "*START*"
673
+
674
+ # Right context
675
+ j = i + 1
676
+ while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]):
677
+ j += 1
678
+ right = tokens[j] if j != len(tokens) else "*END*"
679
+
680
+ return (left, right)
681
+
682
+ # ////////////////////////////////////////////////////////////
683
+ # String Display
684
+ # ////////////////////////////////////////////////////////////
685
+
686
+ def __str__(self):
687
+ return "<Text: %s>" % self.name
688
+
689
+ def __repr__(self):
690
+ return "<Text: %s>" % self.name
691
+
692
+
693
+ # Prototype only; this approach will be slow to load
694
+ class TextCollection(Text):
695
+ """A collection of texts, which can be loaded with list of texts, or
696
+ with a corpus consisting of one or more texts, and which supports
697
+ counting, concordancing, collocation discovery, etc. Initialize a
698
+ TextCollection as follows:
699
+
700
+ >>> import nltk.corpus
701
+ >>> from nltk.text import TextCollection
702
+ >>> from nltk.book import text1, text2, text3
703
+ >>> gutenberg = TextCollection(nltk.corpus.gutenberg)
704
+ >>> mytexts = TextCollection([text1, text2, text3])
705
+
706
+ Iterating over a TextCollection produces all the tokens of all the
707
+ texts in order.
708
+ """
709
+
710
+ def __init__(self, source):
711
+ if hasattr(source, "words"): # bridge to the text corpus reader
712
+ source = [source.words(f) for f in source.fileids()]
713
+
714
+ self._texts = source
715
+ Text.__init__(self, LazyConcatenation(source))
716
+ self._idf_cache = {}
717
+
718
+ def tf(self, term, text):
719
+ """The frequency of the term in text."""
720
+ return text.count(term) / len(text)
721
+
722
+ def idf(self, term):
723
+ """The number of texts in the corpus divided by the
724
+ number of texts that the term appears in.
725
+ If a term does not appear in the corpus, 0.0 is returned."""
726
+ # idf values are cached for performance.
727
+ idf = self._idf_cache.get(term)
728
+ if idf is None:
729
+ matches = len([True for text in self._texts if term in text])
730
+ if len(self._texts) == 0:
731
+ raise ValueError("IDF undefined for empty document collection")
732
+ idf = log(len(self._texts) / matches) if matches else 0.0
733
+ self._idf_cache[term] = idf
734
+ return idf
735
+
736
+ def tf_idf(self, term, text):
737
+ return self.tf(term, text) * self.idf(term)
738
+
739
+
740
+ def demo():
741
+ from nltk.corpus import brown
742
+
743
+ text = Text(brown.words(categories="news"))
744
+ print(text)
745
+ print()
746
+ print("Concordance:")
747
+ text.concordance("news")
748
+ print()
749
+ print("Distributionally similar words:")
750
+ text.similar("news")
751
+ print()
752
+ print("Collocations:")
753
+ text.collocations()
754
+ print()
755
+ # print("Automatically generated text:")
756
+ # text.generate()
757
+ # print()
758
+ print("Dispersion plot:")
759
+ text.dispersion_plot(["news", "report", "said", "announced"])
760
+ print()
761
+ print("Vocabulary plot:")
762
+ text.plot(50)
763
+ print()
764
+ print("Indexing:")
765
+ print("text[3]:", text[3])
766
+ print("text[3:5]:", text[3:5])
767
+ print("text.vocab()['news']:", text.vocab()["news"])
768
+
769
+
770
+ if __name__ == "__main__":
771
+ demo()
772
+
773
+ __all__ = [
774
+ "ContextIndex",
775
+ "ConcordanceIndex",
776
+ "TokenSearcher",
777
+ "Text",
778
+ "TextCollection",
779
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/tgrep.py ADDED
@@ -0,0 +1,1039 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # Natural Language Toolkit: TGrep search
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # Author: Will Roberts <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ ============================================
12
+ TGrep search implementation for NLTK trees
13
+ ============================================
14
+
15
+ This module supports TGrep2 syntax for matching parts of NLTK Trees.
16
+ Note that many tgrep operators require the tree passed to be a
17
+ ``ParentedTree``.
18
+
19
+ External links:
20
+
21
+ - `Tgrep tutorial <https://www.stanford.edu/dept/linguistics/corpora/cas-tut-tgrep.html>`_
22
+ - `Tgrep2 manual <http://tedlab.mit.edu/~dr/Tgrep2/tgrep2.pdf>`_
23
+ - `Tgrep2 source <http://tedlab.mit.edu/~dr/Tgrep2/>`_
24
+
25
+ Usage
26
+ =====
27
+
28
+ >>> from nltk.tree import ParentedTree
29
+ >>> from nltk.tgrep import tgrep_nodes, tgrep_positions
30
+ >>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
31
+ >>> list(tgrep_nodes('NN', [tree]))
32
+ [[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]]
33
+ >>> list(tgrep_positions('NN', [tree]))
34
+ [[(0, 2), (2, 1)]]
35
+ >>> list(tgrep_nodes('DT', [tree]))
36
+ [[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]]
37
+ >>> list(tgrep_nodes('DT $ JJ', [tree]))
38
+ [[ParentedTree('DT', ['the'])]]
39
+
40
+ This implementation adds syntax to select nodes based on their NLTK
41
+ tree position. This syntax is ``N`` plus a Python tuple representing
42
+ the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are
43
+ valid node selectors. Example:
44
+
45
+ >>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
46
+ >>> tree[0,0]
47
+ ParentedTree('DT', ['the'])
48
+ >>> tree[0,0].treeposition()
49
+ (0, 0)
50
+ >>> list(tgrep_nodes('N(0,0)', [tree]))
51
+ [[ParentedTree('DT', ['the'])]]
52
+
53
+ Caveats:
54
+ ========
55
+
56
+ - Link modifiers: "?" and "=" are not implemented.
57
+ - Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are
58
+ not implemented.
59
+ - The "=" and "~" links are not implemented.
60
+
61
+ Known Issues:
62
+ =============
63
+
64
+ - There are some issues with link relations involving leaf nodes
65
+ (which are represented as bare strings in NLTK trees). For
66
+ instance, consider the tree::
67
+
68
+ (S (A x))
69
+
70
+ The search string ``* !>> S`` should select all nodes which are not
71
+ dominated in some way by an ``S`` node (i.e., all nodes which are
72
+ not descendants of an ``S``). Clearly, in this tree, the only node
73
+ which fulfills this criterion is the top node (since it is not
74
+ dominated by anything). However, the code here will find both the
75
+ top node and the leaf node ``x``. This is because we cannot recover
76
+ the parent of the leaf, since it is stored as a bare string.
77
+
78
+ A possible workaround, when performing this kind of search, would be
79
+ to filter out all leaf nodes.
80
+
81
+ Implementation notes
82
+ ====================
83
+
84
+ This implementation is (somewhat awkwardly) based on lambda functions
85
+ which are predicates on a node. A predicate is a function which is
86
+ either True or False; using a predicate function, we can identify sets
87
+ of nodes with particular properties. A predicate function, could, for
88
+ instance, return True only if a particular node has a label matching a
89
+ particular regular expression, and has a daughter node which has no
90
+ sisters. Because tgrep2 search strings can do things statefully (such
91
+ as substituting in macros, and binding nodes with node labels), the
92
+ actual predicate function is declared with three arguments::
93
+
94
+ pred = lambda n, m, l: return True # some logic here
95
+
96
+ ``n``
97
+ is a node in a tree; this argument must always be given
98
+
99
+ ``m``
100
+ contains a dictionary, mapping macro names onto predicate functions
101
+
102
+ ``l``
103
+ is a dictionary to map node labels onto nodes in the tree
104
+
105
+ ``m`` and ``l`` are declared to default to ``None``, and so need not be
106
+ specified in a call to a predicate. Predicates which call other
107
+ predicates must always pass the value of these arguments on. The
108
+ top-level predicate (constructed by ``_tgrep_exprs_action``) binds the
109
+ macro definitions to ``m`` and initialises ``l`` to an empty dictionary.
110
+ """
111
+
112
+ import functools
113
+ import re
114
+
115
+ try:
116
+ import pyparsing
117
+ except ImportError:
118
+ print("Warning: nltk.tgrep will not work without the `pyparsing` package")
119
+ print("installed.")
120
+
121
+ import nltk.tree
122
+
123
+
124
+ class TgrepException(Exception):
125
+ """Tgrep exception type."""
126
+
127
+ pass
128
+
129
+
130
+ def ancestors(node):
131
+ """
132
+ Returns the list of all nodes dominating the given tree node.
133
+ This method will not work with leaf nodes, since there is no way
134
+ to recover the parent.
135
+ """
136
+ results = []
137
+ try:
138
+ current = node.parent()
139
+ except AttributeError:
140
+ # if node is a leaf, we cannot retrieve its parent
141
+ return results
142
+ while current:
143
+ results.append(current)
144
+ current = current.parent()
145
+ return results
146
+
147
+
148
+ def unique_ancestors(node):
149
+ """
150
+ Returns the list of all nodes dominating the given node, where
151
+ there is only a single path of descent.
152
+ """
153
+ results = []
154
+ try:
155
+ current = node.parent()
156
+ except AttributeError:
157
+ # if node is a leaf, we cannot retrieve its parent
158
+ return results
159
+ while current and len(current) == 1:
160
+ results.append(current)
161
+ current = current.parent()
162
+ return results
163
+
164
+
165
+ def _descendants(node):
166
+ """
167
+ Returns the list of all nodes which are descended from the given
168
+ tree node in some way.
169
+ """
170
+ try:
171
+ treepos = node.treepositions()
172
+ except AttributeError:
173
+ return []
174
+ return [node[x] for x in treepos[1:]]
175
+
176
+
177
+ def _leftmost_descendants(node):
178
+ """
179
+ Returns the set of all nodes descended in some way through
180
+ left branches from this node.
181
+ """
182
+ try:
183
+ treepos = node.treepositions()
184
+ except AttributeError:
185
+ return []
186
+ return [node[x] for x in treepos[1:] if all(y == 0 for y in x)]
187
+
188
+
189
+ def _rightmost_descendants(node):
190
+ """
191
+ Returns the set of all nodes descended in some way through
192
+ right branches from this node.
193
+ """
194
+ try:
195
+ rightmost_leaf = max(node.treepositions())
196
+ except AttributeError:
197
+ return []
198
+ return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
199
+
200
+
201
+ def _istree(obj):
202
+ """Predicate to check whether `obj` is a nltk.tree.Tree."""
203
+ return isinstance(obj, nltk.tree.Tree)
204
+
205
+
206
+ def _unique_descendants(node):
207
+ """
208
+ Returns the list of all nodes descended from the given node, where
209
+ there is only a single path of descent.
210
+ """
211
+ results = []
212
+ current = node
213
+ while current and _istree(current) and len(current) == 1:
214
+ current = current[0]
215
+ results.append(current)
216
+ return results
217
+
218
+
219
+ def _before(node):
220
+ """
221
+ Returns the set of all nodes that are before the given node.
222
+ """
223
+ try:
224
+ pos = node.treeposition()
225
+ tree = node.root()
226
+ except AttributeError:
227
+ return []
228
+ return [tree[x] for x in tree.treepositions() if x[: len(pos)] < pos[: len(x)]]
229
+
230
+
231
+ def _immediately_before(node):
232
+ """
233
+ Returns the set of all nodes that are immediately before the given
234
+ node.
235
+
236
+ Tree node A immediately precedes node B if the last terminal
237
+ symbol (word) produced by A immediately precedes the first
238
+ terminal symbol produced by B.
239
+ """
240
+ try:
241
+ pos = node.treeposition()
242
+ tree = node.root()
243
+ except AttributeError:
244
+ return []
245
+ # go "upwards" from pos until there is a place we can go to the left
246
+ idx = len(pos) - 1
247
+ while 0 <= idx and pos[idx] == 0:
248
+ idx -= 1
249
+ if idx < 0:
250
+ return []
251
+ pos = list(pos[: idx + 1])
252
+ pos[-1] -= 1
253
+ before = tree[pos]
254
+ return [before] + _rightmost_descendants(before)
255
+
256
+
257
+ def _after(node):
258
+ """
259
+ Returns the set of all nodes that are after the given node.
260
+ """
261
+ try:
262
+ pos = node.treeposition()
263
+ tree = node.root()
264
+ except AttributeError:
265
+ return []
266
+ return [tree[x] for x in tree.treepositions() if x[: len(pos)] > pos[: len(x)]]
267
+
268
+
269
+ def _immediately_after(node):
270
+ """
271
+ Returns the set of all nodes that are immediately after the given
272
+ node.
273
+
274
+ Tree node A immediately follows node B if the first terminal
275
+ symbol (word) produced by A immediately follows the last
276
+ terminal symbol produced by B.
277
+ """
278
+ try:
279
+ pos = node.treeposition()
280
+ tree = node.root()
281
+ current = node.parent()
282
+ except AttributeError:
283
+ return []
284
+ # go "upwards" from pos until there is a place we can go to the
285
+ # right
286
+ idx = len(pos) - 1
287
+ while 0 <= idx and pos[idx] == len(current) - 1:
288
+ idx -= 1
289
+ current = current.parent()
290
+ if idx < 0:
291
+ return []
292
+ pos = list(pos[: idx + 1])
293
+ pos[-1] += 1
294
+ after = tree[pos]
295
+ return [after] + _leftmost_descendants(after)
296
+
297
+
298
+ def _tgrep_node_literal_value(node):
299
+ """
300
+ Gets the string value of a given parse tree node, for comparison
301
+ using the tgrep node literal predicates.
302
+ """
303
+ return node.label() if _istree(node) else str(node)
304
+
305
+
306
+ def _tgrep_macro_use_action(_s, _l, tokens):
307
+ """
308
+ Builds a lambda function which looks up the macro name used.
309
+ """
310
+ assert len(tokens) == 1
311
+ assert tokens[0][0] == "@"
312
+ macro_name = tokens[0][1:]
313
+
314
+ def macro_use(n, m=None, l=None):
315
+ if m is None or macro_name not in m:
316
+ raise TgrepException(f"macro {macro_name} not defined")
317
+ return m[macro_name](n, m, l)
318
+
319
+ return macro_use
320
+
321
+
322
+ def _tgrep_node_action(_s, _l, tokens):
323
+ """
324
+ Builds a lambda function representing a predicate on a tree node
325
+ depending on the name of its node.
326
+ """
327
+ if tokens[0] == "'":
328
+ # strip initial apostrophe (tgrep2 print command)
329
+ tokens = tokens[1:]
330
+ if len(tokens) > 1:
331
+ # disjunctive definition of a node name
332
+ assert list(set(tokens[1::2])) == ["|"]
333
+ # recursively call self to interpret each node name definition
334
+ tokens = [_tgrep_node_action(None, None, [node]) for node in tokens[::2]]
335
+ # capture tokens and return the disjunction
336
+ return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens)
337
+ else:
338
+ if hasattr(tokens[0], "__call__"):
339
+ # this is a previously interpreted parenthetical node
340
+ # definition (lambda function)
341
+ return tokens[0]
342
+ elif tokens[0] == "*" or tokens[0] == "__":
343
+ return lambda n, m=None, l=None: True
344
+ elif tokens[0].startswith('"'):
345
+ assert tokens[0].endswith('"')
346
+ node_lit = tokens[0][1:-1].replace('\\"', '"').replace("\\\\", "\\")
347
+ return (
348
+ lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s
349
+ )(node_lit)
350
+ elif tokens[0].startswith("/"):
351
+ assert tokens[0].endswith("/")
352
+ node_lit = tokens[0][1:-1]
353
+ return (
354
+ lambda r: lambda n, m=None, l=None: r.search(
355
+ _tgrep_node_literal_value(n)
356
+ )
357
+ )(re.compile(node_lit))
358
+ elif tokens[0].startswith("i@"):
359
+ node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()])
360
+ return (
361
+ lambda f: lambda n, m=None, l=None: f(
362
+ _tgrep_node_literal_value(n).lower()
363
+ )
364
+ )(node_func)
365
+ else:
366
+ return (
367
+ lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s
368
+ )(tokens[0])
369
+
370
+
371
+ def _tgrep_parens_action(_s, _l, tokens):
372
+ """
373
+ Builds a lambda function representing a predicate on a tree node
374
+ from a parenthetical notation.
375
+ """
376
+ assert len(tokens) == 3
377
+ assert tokens[0] == "("
378
+ assert tokens[2] == ")"
379
+ return tokens[1]
380
+
381
+
382
+ def _tgrep_nltk_tree_pos_action(_s, _l, tokens):
383
+ """
384
+ Builds a lambda function representing a predicate on a tree node
385
+ which returns true if the node is located at a specific tree
386
+ position.
387
+ """
388
+ # recover the tuple from the parsed string
389
+ node_tree_position = tuple(int(x) for x in tokens if x.isdigit())
390
+ # capture the node's tree position
391
+ return (
392
+ lambda i: lambda n, m=None, l=None: (
393
+ hasattr(n, "treeposition") and n.treeposition() == i
394
+ )
395
+ )(node_tree_position)
396
+
397
+
398
+ def _tgrep_relation_action(_s, _l, tokens):
399
+ """
400
+ Builds a lambda function representing a predicate on a tree node
401
+ depending on its relation to other nodes in the tree.
402
+ """
403
+ # process negation first if needed
404
+ negated = False
405
+ if tokens[0] == "!":
406
+ negated = True
407
+ tokens = tokens[1:]
408
+ if tokens[0] == "[":
409
+ # process square-bracketed relation expressions
410
+ assert len(tokens) == 3
411
+ assert tokens[2] == "]"
412
+ retval = tokens[1]
413
+ else:
414
+ # process operator-node relation expressions
415
+ assert len(tokens) == 2
416
+ operator, predicate = tokens
417
+ # A < B A is the parent of (immediately dominates) B.
418
+ if operator == "<":
419
+ retval = lambda n, m=None, l=None: (
420
+ _istree(n) and any(predicate(x, m, l) for x in n)
421
+ )
422
+ # A > B A is the child of B.
423
+ elif operator == ">":
424
+ retval = lambda n, m=None, l=None: (
425
+ hasattr(n, "parent")
426
+ and bool(n.parent())
427
+ and predicate(n.parent(), m, l)
428
+ )
429
+ # A <, B Synonymous with A <1 B.
430
+ elif operator == "<," or operator == "<1":
431
+ retval = lambda n, m=None, l=None: (
432
+ _istree(n) and bool(list(n)) and predicate(n[0], m, l)
433
+ )
434
+ # A >, B Synonymous with A >1 B.
435
+ elif operator == ">," or operator == ">1":
436
+ retval = lambda n, m=None, l=None: (
437
+ hasattr(n, "parent")
438
+ and bool(n.parent())
439
+ and (n is n.parent()[0])
440
+ and predicate(n.parent(), m, l)
441
+ )
442
+ # A <N B B is the Nth child of A (the first child is <1).
443
+ elif operator[0] == "<" and operator[1:].isdigit():
444
+ idx = int(operator[1:])
445
+ # capture the index parameter
446
+ retval = (
447
+ lambda i: lambda n, m=None, l=None: (
448
+ _istree(n)
449
+ and bool(list(n))
450
+ and 0 <= i < len(n)
451
+ and predicate(n[i], m, l)
452
+ )
453
+ )(idx - 1)
454
+ # A >N B A is the Nth child of B (the first child is >1).
455
+ elif operator[0] == ">" and operator[1:].isdigit():
456
+ idx = int(operator[1:])
457
+ # capture the index parameter
458
+ retval = (
459
+ lambda i: lambda n, m=None, l=None: (
460
+ hasattr(n, "parent")
461
+ and bool(n.parent())
462
+ and 0 <= i < len(n.parent())
463
+ and (n is n.parent()[i])
464
+ and predicate(n.parent(), m, l)
465
+ )
466
+ )(idx - 1)
467
+ # A <' B B is the last child of A (also synonymous with A <-1 B).
468
+ # A <- B B is the last child of A (synonymous with A <-1 B).
469
+ elif operator == "<'" or operator == "<-" or operator == "<-1":
470
+ retval = lambda n, m=None, l=None: (
471
+ _istree(n) and bool(list(n)) and predicate(n[-1], m, l)
472
+ )
473
+ # A >' B A is the last child of B (also synonymous with A >-1 B).
474
+ # A >- B A is the last child of B (synonymous with A >-1 B).
475
+ elif operator == ">'" or operator == ">-" or operator == ">-1":
476
+ retval = lambda n, m=None, l=None: (
477
+ hasattr(n, "parent")
478
+ and bool(n.parent())
479
+ and (n is n.parent()[-1])
480
+ and predicate(n.parent(), m, l)
481
+ )
482
+ # A <-N B B is the N th-to-last child of A (the last child is <-1).
483
+ elif operator[:2] == "<-" and operator[2:].isdigit():
484
+ idx = -int(operator[2:])
485
+ # capture the index parameter
486
+ retval = (
487
+ lambda i: lambda n, m=None, l=None: (
488
+ _istree(n)
489
+ and bool(list(n))
490
+ and 0 <= (i + len(n)) < len(n)
491
+ and predicate(n[i + len(n)], m, l)
492
+ )
493
+ )(idx)
494
+ # A >-N B A is the N th-to-last child of B (the last child is >-1).
495
+ elif operator[:2] == ">-" and operator[2:].isdigit():
496
+ idx = -int(operator[2:])
497
+ # capture the index parameter
498
+ retval = (
499
+ lambda i: lambda n, m=None, l=None: (
500
+ hasattr(n, "parent")
501
+ and bool(n.parent())
502
+ and 0 <= (i + len(n.parent())) < len(n.parent())
503
+ and (n is n.parent()[i + len(n.parent())])
504
+ and predicate(n.parent(), m, l)
505
+ )
506
+ )(idx)
507
+ # A <: B B is the only child of A
508
+ elif operator == "<:":
509
+ retval = lambda n, m=None, l=None: (
510
+ _istree(n) and len(n) == 1 and predicate(n[0], m, l)
511
+ )
512
+ # A >: B A is the only child of B.
513
+ elif operator == ">:":
514
+ retval = lambda n, m=None, l=None: (
515
+ hasattr(n, "parent")
516
+ and bool(n.parent())
517
+ and len(n.parent()) == 1
518
+ and predicate(n.parent(), m, l)
519
+ )
520
+ # A << B A dominates B (A is an ancestor of B).
521
+ elif operator == "<<":
522
+ retval = lambda n, m=None, l=None: (
523
+ _istree(n) and any(predicate(x, m, l) for x in _descendants(n))
524
+ )
525
+ # A >> B A is dominated by B (A is a descendant of B).
526
+ elif operator == ">>":
527
+ retval = lambda n, m=None, l=None: any(
528
+ predicate(x, m, l) for x in ancestors(n)
529
+ )
530
+ # A <<, B B is a left-most descendant of A.
531
+ elif operator == "<<," or operator == "<<1":
532
+ retval = lambda n, m=None, l=None: (
533
+ _istree(n) and any(predicate(x, m, l) for x in _leftmost_descendants(n))
534
+ )
535
+ # A >>, B A is a left-most descendant of B.
536
+ elif operator == ">>,":
537
+ retval = lambda n, m=None, l=None: any(
538
+ (predicate(x, m, l) and n in _leftmost_descendants(x))
539
+ for x in ancestors(n)
540
+ )
541
+ # A <<' B B is a right-most descendant of A.
542
+ elif operator == "<<'":
543
+ retval = lambda n, m=None, l=None: (
544
+ _istree(n)
545
+ and any(predicate(x, m, l) for x in _rightmost_descendants(n))
546
+ )
547
+ # A >>' B A is a right-most descendant of B.
548
+ elif operator == ">>'":
549
+ retval = lambda n, m=None, l=None: any(
550
+ (predicate(x, m, l) and n in _rightmost_descendants(x))
551
+ for x in ancestors(n)
552
+ )
553
+ # A <<: B There is a single path of descent from A and B is on it.
554
+ elif operator == "<<:":
555
+ retval = lambda n, m=None, l=None: (
556
+ _istree(n) and any(predicate(x, m, l) for x in _unique_descendants(n))
557
+ )
558
+ # A >>: B There is a single path of descent from B and A is on it.
559
+ elif operator == ">>:":
560
+ retval = lambda n, m=None, l=None: any(
561
+ predicate(x, m, l) for x in unique_ancestors(n)
562
+ )
563
+ # A . B A immediately precedes B.
564
+ elif operator == ".":
565
+ retval = lambda n, m=None, l=None: any(
566
+ predicate(x, m, l) for x in _immediately_after(n)
567
+ )
568
+ # A , B A immediately follows B.
569
+ elif operator == ",":
570
+ retval = lambda n, m=None, l=None: any(
571
+ predicate(x, m, l) for x in _immediately_before(n)
572
+ )
573
+ # A .. B A precedes B.
574
+ elif operator == "..":
575
+ retval = lambda n, m=None, l=None: any(
576
+ predicate(x, m, l) for x in _after(n)
577
+ )
578
+ # A ,, B A follows B.
579
+ elif operator == ",,":
580
+ retval = lambda n, m=None, l=None: any(
581
+ predicate(x, m, l) for x in _before(n)
582
+ )
583
+ # A $ B A is a sister of B (and A != B).
584
+ elif operator == "$" or operator == "%":
585
+ retval = lambda n, m=None, l=None: (
586
+ hasattr(n, "parent")
587
+ and bool(n.parent())
588
+ and any(predicate(x, m, l) for x in n.parent() if x is not n)
589
+ )
590
+ # A $. B A is a sister of and immediately precedes B.
591
+ elif operator == "$." or operator == "%.":
592
+ retval = lambda n, m=None, l=None: (
593
+ hasattr(n, "right_sibling")
594
+ and bool(n.right_sibling())
595
+ and predicate(n.right_sibling(), m, l)
596
+ )
597
+ # A $, B A is a sister of and immediately follows B.
598
+ elif operator == "$," or operator == "%,":
599
+ retval = lambda n, m=None, l=None: (
600
+ hasattr(n, "left_sibling")
601
+ and bool(n.left_sibling())
602
+ and predicate(n.left_sibling(), m, l)
603
+ )
604
+ # A $.. B A is a sister of and precedes B.
605
+ elif operator == "$.." or operator == "%..":
606
+ retval = lambda n, m=None, l=None: (
607
+ hasattr(n, "parent")
608
+ and hasattr(n, "parent_index")
609
+ and bool(n.parent())
610
+ and any(predicate(x, m, l) for x in n.parent()[n.parent_index() + 1 :])
611
+ )
612
+ # A $,, B A is a sister of and follows B.
613
+ elif operator == "$,," or operator == "%,,":
614
+ retval = lambda n, m=None, l=None: (
615
+ hasattr(n, "parent")
616
+ and hasattr(n, "parent_index")
617
+ and bool(n.parent())
618
+ and any(predicate(x, m, l) for x in n.parent()[: n.parent_index()])
619
+ )
620
+ else:
621
+ raise TgrepException(f'cannot interpret tgrep operator "{operator}"')
622
+ # now return the built function
623
+ if negated:
624
+ return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval)
625
+ else:
626
+ return retval
627
+
628
+
629
+ def _tgrep_conjunction_action(_s, _l, tokens, join_char="&"):
630
+ """
631
+ Builds a lambda function representing a predicate on a tree node
632
+ from the conjunction of several other such lambda functions.
633
+
634
+ This is prototypically called for expressions like
635
+ (`tgrep_rel_conjunction`)::
636
+
637
+ < NP & < AP < VP
638
+
639
+ where tokens is a list of predicates representing the relations
640
+ (`< NP`, `< AP`, and `< VP`), possibly with the character `&`
641
+ included (as in the example here).
642
+
643
+ This is also called for expressions like (`tgrep_node_expr2`)::
644
+
645
+ NP < NN
646
+ S=s < /NP/=n : s < /VP/=v : n .. v
647
+
648
+ tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional)
649
+ list of segmented patterns (`tgrep_expr_labeled`, processed by
650
+ `_tgrep_segmented_pattern_action`).
651
+ """
652
+ # filter out the ampersand
653
+ tokens = [x for x in tokens if x != join_char]
654
+ if len(tokens) == 1:
655
+ return tokens[0]
656
+ else:
657
+ return (
658
+ lambda ts: lambda n, m=None, l=None: all(
659
+ predicate(n, m, l) for predicate in ts
660
+ )
661
+ )(tokens)
662
+
663
+
664
+ def _tgrep_segmented_pattern_action(_s, _l, tokens):
665
+ """
666
+ Builds a lambda function representing a segmented pattern.
667
+
668
+ Called for expressions like (`tgrep_expr_labeled`)::
669
+
670
+ =s .. =v < =n
671
+
672
+ This is a segmented pattern, a tgrep2 expression which begins with
673
+ a node label.
674
+
675
+ The problem is that for segemented_pattern_action (': =v < =s'),
676
+ the first element (in this case, =v) is specifically selected by
677
+ virtue of matching a particular node in the tree; to retrieve
678
+ the node, we need the label, not a lambda function. For node
679
+ labels inside a tgrep_node_expr, we need a lambda function which
680
+ returns true if the node visited is the same as =v.
681
+
682
+ We solve this by creating two copies of a node_label_use in the
683
+ grammar; the label use inside a tgrep_expr_labeled has a separate
684
+ parse action to the pred use inside a node_expr. See
685
+ `_tgrep_node_label_use_action` and
686
+ `_tgrep_node_label_pred_use_action`.
687
+ """
688
+ # tokens[0] is a string containing the node label
689
+ node_label = tokens[0]
690
+ # tokens[1:] is an (optional) list of predicates which must all
691
+ # hold of the bound node
692
+ reln_preds = tokens[1:]
693
+
694
+ def pattern_segment_pred(n, m=None, l=None):
695
+ """This predicate function ignores its node argument."""
696
+ # look up the bound node using its label
697
+ if l is None or node_label not in l:
698
+ raise TgrepException(f"node_label ={node_label} not bound in pattern")
699
+ node = l[node_label]
700
+ # match the relation predicates against the node
701
+ return all(pred(node, m, l) for pred in reln_preds)
702
+
703
+ return pattern_segment_pred
704
+
705
+
706
+ def _tgrep_node_label_use_action(_s, _l, tokens):
707
+ """
708
+ Returns the node label used to begin a tgrep_expr_labeled. See
709
+ `_tgrep_segmented_pattern_action`.
710
+
711
+ Called for expressions like (`tgrep_node_label_use`)::
712
+
713
+ =s
714
+
715
+ when they appear as the first element of a `tgrep_expr_labeled`
716
+ expression (see `_tgrep_segmented_pattern_action`).
717
+
718
+ It returns the node label.
719
+ """
720
+ assert len(tokens) == 1
721
+ assert tokens[0].startswith("=")
722
+ return tokens[0][1:]
723
+
724
+
725
+ def _tgrep_node_label_pred_use_action(_s, _l, tokens):
726
+ """
727
+ Builds a lambda function representing a predicate on a tree node
728
+ which describes the use of a previously bound node label.
729
+
730
+ Called for expressions like (`tgrep_node_label_use_pred`)::
731
+
732
+ =s
733
+
734
+ when they appear inside a tgrep_node_expr (for example, inside a
735
+ relation). The predicate returns true if and only if its node
736
+ argument is identical the the node looked up in the node label
737
+ dictionary using the node's label.
738
+ """
739
+ assert len(tokens) == 1
740
+ assert tokens[0].startswith("=")
741
+ node_label = tokens[0][1:]
742
+
743
+ def node_label_use_pred(n, m=None, l=None):
744
+ # look up the bound node using its label
745
+ if l is None or node_label not in l:
746
+ raise TgrepException(f"node_label ={node_label} not bound in pattern")
747
+ node = l[node_label]
748
+ # truth means the given node is this node
749
+ return n is node
750
+
751
+ return node_label_use_pred
752
+
753
+
754
+ def _tgrep_bind_node_label_action(_s, _l, tokens):
755
+ """
756
+ Builds a lambda function representing a predicate on a tree node
757
+ which can optionally bind a matching node into the tgrep2 string's
758
+ label_dict.
759
+
760
+ Called for expressions like (`tgrep_node_expr2`)::
761
+
762
+ /NP/
763
+ @NP=n
764
+ """
765
+ # tokens[0] is a tgrep_node_expr
766
+ if len(tokens) == 1:
767
+ return tokens[0]
768
+ else:
769
+ # if present, tokens[1] is the character '=', and tokens[2] is
770
+ # a tgrep_node_label, a string value containing the node label
771
+ assert len(tokens) == 3
772
+ assert tokens[1] == "="
773
+ node_pred = tokens[0]
774
+ node_label = tokens[2]
775
+
776
+ def node_label_bind_pred(n, m=None, l=None):
777
+ if node_pred(n, m, l):
778
+ # bind `n` into the dictionary `l`
779
+ if l is None:
780
+ raise TgrepException(
781
+ "cannot bind node_label {}: label_dict is None".format(
782
+ node_label
783
+ )
784
+ )
785
+ l[node_label] = n
786
+ return True
787
+ else:
788
+ return False
789
+
790
+ return node_label_bind_pred
791
+
792
+
793
+ def _tgrep_rel_disjunction_action(_s, _l, tokens):
794
+ """
795
+ Builds a lambda function representing a predicate on a tree node
796
+ from the disjunction of several other such lambda functions.
797
+ """
798
+ # filter out the pipe
799
+ tokens = [x for x in tokens if x != "|"]
800
+ if len(tokens) == 1:
801
+ return tokens[0]
802
+ elif len(tokens) == 2:
803
+ return (lambda a, b: lambda n, m=None, l=None: a(n, m, l) or b(n, m, l))(
804
+ tokens[0], tokens[1]
805
+ )
806
+
807
+
808
+ def _macro_defn_action(_s, _l, tokens):
809
+ """
810
+ Builds a dictionary structure which defines the given macro.
811
+ """
812
+ assert len(tokens) == 3
813
+ assert tokens[0] == "@"
814
+ return {tokens[1]: tokens[2]}
815
+
816
+
817
+ def _tgrep_exprs_action(_s, _l, tokens):
818
+ """
819
+ This is the top-lebel node in a tgrep2 search string; the
820
+ predicate function it returns binds together all the state of a
821
+ tgrep2 search string.
822
+
823
+ Builds a lambda function representing a predicate on a tree node
824
+ from the disjunction of several tgrep expressions. Also handles
825
+ macro definitions and macro name binding, and node label
826
+ definitions and node label binding.
827
+ """
828
+ if len(tokens) == 1:
829
+ return lambda n, m=None, l=None: tokens[0](n, None, {})
830
+ # filter out all the semicolons
831
+ tokens = [x for x in tokens if x != ";"]
832
+ # collect all macro definitions
833
+ macro_dict = {}
834
+ macro_defs = [tok for tok in tokens if isinstance(tok, dict)]
835
+ for macro_def in macro_defs:
836
+ macro_dict.update(macro_def)
837
+ # collect all tgrep expressions
838
+ tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)]
839
+ # create a new scope for the node label dictionary
840
+ def top_level_pred(n, m=macro_dict, l=None):
841
+ label_dict = {}
842
+ # bind macro definitions and OR together all tgrep_exprs
843
+ return any(predicate(n, m, label_dict) for predicate in tgrep_exprs)
844
+
845
+ return top_level_pred
846
+
847
+
848
+ def _build_tgrep_parser(set_parse_actions=True):
849
+ """
850
+ Builds a pyparsing-based parser object for tokenizing and
851
+ interpreting tgrep search strings.
852
+ """
853
+ tgrep_op = pyparsing.Optional("!") + pyparsing.Regex("[$%,.<>][%,.<>0-9-':]*")
854
+ tgrep_qstring = pyparsing.QuotedString(
855
+ quoteChar='"', escChar="\\", unquoteResults=False
856
+ )
857
+ tgrep_node_regex = pyparsing.QuotedString(
858
+ quoteChar="/", escChar="\\", unquoteResults=False
859
+ )
860
+ tgrep_qstring_icase = pyparsing.Regex('i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"')
861
+ tgrep_node_regex_icase = pyparsing.Regex("i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/")
862
+ tgrep_node_literal = pyparsing.Regex("[^][ \r\t\n;:.,&|<>()$!@%'^=]+")
863
+ tgrep_expr = pyparsing.Forward()
864
+ tgrep_relations = pyparsing.Forward()
865
+ tgrep_parens = pyparsing.Literal("(") + tgrep_expr + ")"
866
+ tgrep_nltk_tree_pos = (
867
+ pyparsing.Literal("N(")
868
+ + pyparsing.Optional(
869
+ pyparsing.Word(pyparsing.nums)
870
+ + ","
871
+ + pyparsing.Optional(
872
+ pyparsing.delimitedList(pyparsing.Word(pyparsing.nums), delim=",")
873
+ + pyparsing.Optional(",")
874
+ )
875
+ )
876
+ + ")"
877
+ )
878
+ tgrep_node_label = pyparsing.Regex("[A-Za-z0-9]+")
879
+ tgrep_node_label_use = pyparsing.Combine("=" + tgrep_node_label)
880
+ # see _tgrep_segmented_pattern_action
881
+ tgrep_node_label_use_pred = tgrep_node_label_use.copy()
882
+ macro_name = pyparsing.Regex("[^];:.,&|<>()[$!@%'^=\r\t\n ]+")
883
+ macro_name.setWhitespaceChars("")
884
+ macro_use = pyparsing.Combine("@" + macro_name)
885
+ tgrep_node_expr = (
886
+ tgrep_node_label_use_pred
887
+ | macro_use
888
+ | tgrep_nltk_tree_pos
889
+ | tgrep_qstring_icase
890
+ | tgrep_node_regex_icase
891
+ | tgrep_qstring
892
+ | tgrep_node_regex
893
+ | "*"
894
+ | tgrep_node_literal
895
+ )
896
+ tgrep_node_expr2 = (
897
+ tgrep_node_expr
898
+ + pyparsing.Literal("=").setWhitespaceChars("")
899
+ + tgrep_node_label.copy().setWhitespaceChars("")
900
+ ) | tgrep_node_expr
901
+ tgrep_node = tgrep_parens | (
902
+ pyparsing.Optional("'")
903
+ + tgrep_node_expr2
904
+ + pyparsing.ZeroOrMore("|" + tgrep_node_expr)
905
+ )
906
+ tgrep_brackets = pyparsing.Optional("!") + "[" + tgrep_relations + "]"
907
+ tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node)
908
+ tgrep_rel_conjunction = pyparsing.Forward()
909
+ tgrep_rel_conjunction << (
910
+ tgrep_relation
911
+ + pyparsing.ZeroOrMore(pyparsing.Optional("&") + tgrep_rel_conjunction)
912
+ )
913
+ tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore(
914
+ "|" + tgrep_relations
915
+ )
916
+ tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations)
917
+ tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations)
918
+ tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(":" + tgrep_expr_labeled)
919
+ macro_defn = (
920
+ pyparsing.Literal("@") + pyparsing.White().suppress() + macro_name + tgrep_expr2
921
+ )
922
+ tgrep_exprs = (
923
+ pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(";" + macro_defn) + ";")
924
+ + tgrep_expr2
925
+ + pyparsing.ZeroOrMore(";" + (macro_defn | tgrep_expr2))
926
+ + pyparsing.ZeroOrMore(";").suppress()
927
+ )
928
+ if set_parse_actions:
929
+ tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action)
930
+ tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action)
931
+ macro_use.setParseAction(_tgrep_macro_use_action)
932
+ tgrep_node.setParseAction(_tgrep_node_action)
933
+ tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action)
934
+ tgrep_parens.setParseAction(_tgrep_parens_action)
935
+ tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action)
936
+ tgrep_relation.setParseAction(_tgrep_relation_action)
937
+ tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action)
938
+ tgrep_relations.setParseAction(_tgrep_rel_disjunction_action)
939
+ macro_defn.setParseAction(_macro_defn_action)
940
+ # the whole expression is also the conjunction of two
941
+ # predicates: the first node predicate, and the remaining
942
+ # relation predicates
943
+ tgrep_expr.setParseAction(_tgrep_conjunction_action)
944
+ tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action)
945
+ tgrep_expr2.setParseAction(
946
+ functools.partial(_tgrep_conjunction_action, join_char=":")
947
+ )
948
+ tgrep_exprs.setParseAction(_tgrep_exprs_action)
949
+ return tgrep_exprs.ignore("#" + pyparsing.restOfLine)
950
+
951
+
952
+ def tgrep_tokenize(tgrep_string):
953
+ """
954
+ Tokenizes a TGrep search string into separate tokens.
955
+ """
956
+ parser = _build_tgrep_parser(False)
957
+ if isinstance(tgrep_string, bytes):
958
+ tgrep_string = tgrep_string.decode()
959
+ return list(parser.parseString(tgrep_string))
960
+
961
+
962
+ def tgrep_compile(tgrep_string):
963
+ """
964
+ Parses (and tokenizes, if necessary) a TGrep search string into a
965
+ lambda function.
966
+ """
967
+ parser = _build_tgrep_parser(True)
968
+ if isinstance(tgrep_string, bytes):
969
+ tgrep_string = tgrep_string.decode()
970
+ return list(parser.parseString(tgrep_string, parseAll=True))[0]
971
+
972
+
973
+ def treepositions_no_leaves(tree):
974
+ """
975
+ Returns all the tree positions in the given tree which are not
976
+ leaf nodes.
977
+ """
978
+ treepositions = tree.treepositions()
979
+ # leaves are treeposition tuples that are not prefixes of any
980
+ # other treeposition
981
+ prefixes = set()
982
+ for pos in treepositions:
983
+ for length in range(len(pos)):
984
+ prefixes.add(pos[:length])
985
+ return [pos for pos in treepositions if pos in prefixes]
986
+
987
+
988
+ def tgrep_positions(pattern, trees, search_leaves=True):
989
+ """
990
+ Return the tree positions in the trees which match the given pattern.
991
+
992
+ :param pattern: a tgrep search pattern
993
+ :type pattern: str or output of tgrep_compile()
994
+ :param trees: a sequence of NLTK trees (usually ParentedTrees)
995
+ :type trees: iter(ParentedTree) or iter(Tree)
996
+ :param search_leaves: whether to return matching leaf nodes
997
+ :type search_leaves: bool
998
+ :rtype: iter(tree positions)
999
+ """
1000
+
1001
+ if isinstance(pattern, (bytes, str)):
1002
+ pattern = tgrep_compile(pattern)
1003
+
1004
+ for tree in trees:
1005
+ try:
1006
+ if search_leaves:
1007
+ positions = tree.treepositions()
1008
+ else:
1009
+ positions = treepositions_no_leaves(tree)
1010
+ yield [position for position in positions if pattern(tree[position])]
1011
+ except AttributeError:
1012
+ yield []
1013
+
1014
+
1015
+ def tgrep_nodes(pattern, trees, search_leaves=True):
1016
+ """
1017
+ Return the tree nodes in the trees which match the given pattern.
1018
+
1019
+ :param pattern: a tgrep search pattern
1020
+ :type pattern: str or output of tgrep_compile()
1021
+ :param trees: a sequence of NLTK trees (usually ParentedTrees)
1022
+ :type trees: iter(ParentedTree) or iter(Tree)
1023
+ :param search_leaves: whether to return matching leaf nodes
1024
+ :type search_leaves: bool
1025
+ :rtype: iter(tree nodes)
1026
+ """
1027
+
1028
+ if isinstance(pattern, (bytes, str)):
1029
+ pattern = tgrep_compile(pattern)
1030
+
1031
+ for tree in trees:
1032
+ try:
1033
+ if search_leaves:
1034
+ positions = tree.treepositions()
1035
+ else:
1036
+ positions = treepositions_no_leaves(tree)
1037
+ yield [tree[position] for position in positions if pattern(tree[position])]
1038
+ except AttributeError:
1039
+ yield []
llmeval-env/lib/python3.10/site-packages/nltk/toolbox.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Toolbox Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Greg Aumann <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Module for reading, writing and manipulating
10
+ Toolbox databases and settings files.
11
+ """
12
+
13
+ import codecs
14
+ import re
15
+ from io import StringIO
16
+ from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder
17
+
18
+ from nltk.data import PathPointer, find
19
+
20
+
21
+ class StandardFormat:
22
+ """
23
+ Class for reading and processing standard format marker files and strings.
24
+ """
25
+
26
+ def __init__(self, filename=None, encoding=None):
27
+ self._encoding = encoding
28
+ if filename is not None:
29
+ self.open(filename)
30
+
31
+ def open(self, sfm_file):
32
+ """
33
+ Open a standard format marker file for sequential reading.
34
+
35
+ :param sfm_file: name of the standard format marker input file
36
+ :type sfm_file: str
37
+ """
38
+ if isinstance(sfm_file, PathPointer):
39
+ self._file = sfm_file.open(self._encoding)
40
+ else:
41
+ self._file = codecs.open(sfm_file, "r", self._encoding)
42
+
43
+ def open_string(self, s):
44
+ """
45
+ Open a standard format marker string for sequential reading.
46
+
47
+ :param s: string to parse as a standard format marker input file
48
+ :type s: str
49
+ """
50
+ self._file = StringIO(s)
51
+
52
+ def raw_fields(self):
53
+ """
54
+ Return an iterator that returns the next field in a (marker, value)
55
+ tuple. Linebreaks and trailing white space are preserved except
56
+ for the final newline in each field.
57
+
58
+ :rtype: iter(tuple(str, str))
59
+ """
60
+ join_string = "\n"
61
+ line_regexp = r"^%s(?:\\(\S+)\s*)?(.*)$"
62
+ # discard a BOM in the first line
63
+ first_line_pat = re.compile(line_regexp % "(?:\xef\xbb\xbf)?")
64
+ line_pat = re.compile(line_regexp % "")
65
+ # need to get first line outside the loop for correct handling
66
+ # of the first marker if it spans multiple lines
67
+ file_iter = iter(self._file)
68
+ # PEP 479, prevent RuntimeError when StopIteration is raised inside generator
69
+ try:
70
+ line = next(file_iter)
71
+ except StopIteration:
72
+ # no more data is available, terminate the generator
73
+ return
74
+ mobj = re.match(first_line_pat, line)
75
+ mkr, line_value = mobj.groups()
76
+ value_lines = [line_value]
77
+ self.line_num = 0
78
+ for line in file_iter:
79
+ self.line_num += 1
80
+ mobj = re.match(line_pat, line)
81
+ line_mkr, line_value = mobj.groups()
82
+ if line_mkr:
83
+ yield (mkr, join_string.join(value_lines))
84
+ mkr = line_mkr
85
+ value_lines = [line_value]
86
+ else:
87
+ value_lines.append(line_value)
88
+ self.line_num += 1
89
+ yield (mkr, join_string.join(value_lines))
90
+
91
+ def fields(
92
+ self,
93
+ strip=True,
94
+ unwrap=True,
95
+ encoding=None,
96
+ errors="strict",
97
+ unicode_fields=None,
98
+ ):
99
+ """
100
+ Return an iterator that returns the next field in a ``(marker, value)``
101
+ tuple, where ``marker`` and ``value`` are unicode strings if an ``encoding``
102
+ was specified in the ``fields()`` method. Otherwise they are non-unicode strings.
103
+
104
+ :param strip: strip trailing whitespace from the last line of each field
105
+ :type strip: bool
106
+ :param unwrap: Convert newlines in a field to spaces.
107
+ :type unwrap: bool
108
+ :param encoding: Name of an encoding to use. If it is specified then
109
+ the ``fields()`` method returns unicode strings rather than non
110
+ unicode strings.
111
+ :type encoding: str or None
112
+ :param errors: Error handling scheme for codec. Same as the ``decode()``
113
+ builtin string method.
114
+ :type errors: str
115
+ :param unicode_fields: Set of marker names whose values are UTF-8 encoded.
116
+ Ignored if encoding is None. If the whole file is UTF-8 encoded set
117
+ ``encoding='utf8'`` and leave ``unicode_fields`` with its default
118
+ value of None.
119
+ :type unicode_fields: sequence
120
+ :rtype: iter(tuple(str, str))
121
+ """
122
+ if encoding is None and unicode_fields is not None:
123
+ raise ValueError("unicode_fields is set but not encoding.")
124
+ unwrap_pat = re.compile(r"\n+")
125
+ for mkr, val in self.raw_fields():
126
+ if unwrap:
127
+ val = unwrap_pat.sub(" ", val)
128
+ if strip:
129
+ val = val.rstrip()
130
+ yield (mkr, val)
131
+
132
+ def close(self):
133
+ """Close a previously opened standard format marker file or string."""
134
+ self._file.close()
135
+ try:
136
+ del self.line_num
137
+ except AttributeError:
138
+ pass
139
+
140
+
141
+ class ToolboxData(StandardFormat):
142
+ def parse(self, grammar=None, **kwargs):
143
+ if grammar:
144
+ return self._chunk_parse(grammar=grammar, **kwargs)
145
+ else:
146
+ return self._record_parse(**kwargs)
147
+
148
+ def _record_parse(self, key=None, **kwargs):
149
+ r"""
150
+ Returns an element tree structure corresponding to a toolbox data file with
151
+ all markers at the same level.
152
+
153
+ Thus the following Toolbox database::
154
+ \_sh v3.0 400 Rotokas Dictionary
155
+ \_DateStampHasFourDigitYear
156
+
157
+ \lx kaa
158
+ \ps V.A
159
+ \ge gag
160
+ \gp nek i pas
161
+
162
+ \lx kaa
163
+ \ps V.B
164
+ \ge strangle
165
+ \gp pasim nek
166
+
167
+ after parsing will end up with the same structure (ignoring the extra
168
+ whitespace) as the following XML fragment after being parsed by
169
+ ElementTree::
170
+ <toolbox_data>
171
+ <header>
172
+ <_sh>v3.0 400 Rotokas Dictionary</_sh>
173
+ <_DateStampHasFourDigitYear/>
174
+ </header>
175
+
176
+ <record>
177
+ <lx>kaa</lx>
178
+ <ps>V.A</ps>
179
+ <ge>gag</ge>
180
+ <gp>nek i pas</gp>
181
+ </record>
182
+
183
+ <record>
184
+ <lx>kaa</lx>
185
+ <ps>V.B</ps>
186
+ <ge>strangle</ge>
187
+ <gp>pasim nek</gp>
188
+ </record>
189
+ </toolbox_data>
190
+
191
+ :param key: Name of key marker at the start of each record. If set to
192
+ None (the default value) the first marker that doesn't begin with
193
+ an underscore is assumed to be the key.
194
+ :type key: str
195
+ :param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
196
+ :type kwargs: dict
197
+ :rtype: ElementTree._ElementInterface
198
+ :return: contents of toolbox data divided into header and records
199
+ """
200
+ builder = TreeBuilder()
201
+ builder.start("toolbox_data", {})
202
+ builder.start("header", {})
203
+ in_records = False
204
+ for mkr, value in self.fields(**kwargs):
205
+ if key is None and not in_records and mkr[0] != "_":
206
+ key = mkr
207
+ if mkr == key:
208
+ if in_records:
209
+ builder.end("record")
210
+ else:
211
+ builder.end("header")
212
+ in_records = True
213
+ builder.start("record", {})
214
+ builder.start(mkr, {})
215
+ builder.data(value)
216
+ builder.end(mkr)
217
+ if in_records:
218
+ builder.end("record")
219
+ else:
220
+ builder.end("header")
221
+ builder.end("toolbox_data")
222
+ return builder.close()
223
+
224
+ def _tree2etree(self, parent):
225
+ from nltk.tree import Tree
226
+
227
+ root = Element(parent.label())
228
+ for child in parent:
229
+ if isinstance(child, Tree):
230
+ root.append(self._tree2etree(child))
231
+ else:
232
+ text, tag = child
233
+ e = SubElement(root, tag)
234
+ e.text = text
235
+ return root
236
+
237
+ def _chunk_parse(self, grammar=None, root_label="record", trace=0, **kwargs):
238
+ """
239
+ Returns an element tree structure corresponding to a toolbox data file
240
+ parsed according to the chunk grammar.
241
+
242
+ :type grammar: str
243
+ :param grammar: Contains the chunking rules used to parse the
244
+ database. See ``chunk.RegExp`` for documentation.
245
+ :type root_label: str
246
+ :param root_label: The node value that should be used for the
247
+ top node of the chunk structure.
248
+ :type trace: int
249
+ :param trace: The level of tracing that should be used when
250
+ parsing a text. ``0`` will generate no tracing output;
251
+ ``1`` will generate normal tracing output; and ``2`` or
252
+ higher will generate verbose tracing output.
253
+ :type kwargs: dict
254
+ :param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()``
255
+ :rtype: ElementTree._ElementInterface
256
+ """
257
+ from nltk import chunk
258
+ from nltk.tree import Tree
259
+
260
+ cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace)
261
+ db = self.parse(**kwargs)
262
+ tb_etree = Element("toolbox_data")
263
+ header = db.find("header")
264
+ tb_etree.append(header)
265
+ for record in db.findall("record"):
266
+ parsed = cp.parse([(elem.text, elem.tag) for elem in record])
267
+ tb_etree.append(self._tree2etree(parsed))
268
+ return tb_etree
269
+
270
+
271
+ _is_value = re.compile(r"\S")
272
+
273
+
274
+ def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None):
275
+ """
276
+ Return a string with a standard format representation of the toolbox
277
+ data in tree (tree can be a toolbox database or a single record).
278
+
279
+ :param tree: flat representation of toolbox data (whole database or single record)
280
+ :type tree: ElementTree._ElementInterface
281
+ :param encoding: Name of an encoding to use.
282
+ :type encoding: str
283
+ :param errors: Error handling scheme for codec. Same as the ``encode()``
284
+ builtin string method.
285
+ :type errors: str
286
+ :param unicode_fields:
287
+ :type unicode_fields: dict(str) or set(str)
288
+ :rtype: str
289
+ """
290
+ if tree.tag == "record":
291
+ root = Element("toolbox_data")
292
+ root.append(tree)
293
+ tree = root
294
+
295
+ if tree.tag != "toolbox_data":
296
+ raise ValueError("not a toolbox_data element structure")
297
+ if encoding is None and unicode_fields is not None:
298
+ raise ValueError(
299
+ "if encoding is not specified then neither should unicode_fields"
300
+ )
301
+ l = []
302
+ for rec in tree:
303
+ l.append("\n")
304
+ for field in rec:
305
+ mkr = field.tag
306
+ value = field.text
307
+ if encoding is not None:
308
+ if unicode_fields is not None and mkr in unicode_fields:
309
+ cur_encoding = "utf8"
310
+ else:
311
+ cur_encoding = encoding
312
+ if re.search(_is_value, value):
313
+ l.append((f"\\{mkr} {value}\n").encode(cur_encoding, errors))
314
+ else:
315
+ l.append((f"\\{mkr}{value}\n").encode(cur_encoding, errors))
316
+ else:
317
+ if re.search(_is_value, value):
318
+ l.append(f"\\{mkr} {value}\n")
319
+ else:
320
+ l.append(f"\\{mkr}{value}\n")
321
+ return "".join(l[1:])
322
+
323
+
324
+ class ToolboxSettings(StandardFormat):
325
+ """This class is the base class for settings files."""
326
+
327
+ def __init__(self):
328
+ super().__init__()
329
+
330
+ def parse(self, encoding=None, errors="strict", **kwargs):
331
+ """
332
+ Return the contents of toolbox settings file with a nested structure.
333
+
334
+ :param encoding: encoding used by settings file
335
+ :type encoding: str
336
+ :param errors: Error handling scheme for codec. Same as ``decode()`` builtin method.
337
+ :type errors: str
338
+ :param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
339
+ :type kwargs: dict
340
+ :rtype: ElementTree._ElementInterface
341
+ """
342
+ builder = TreeBuilder()
343
+ for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs):
344
+ # Check whether the first char of the field marker
345
+ # indicates a block start (+) or end (-)
346
+ block = mkr[0]
347
+ if block in ("+", "-"):
348
+ mkr = mkr[1:]
349
+ else:
350
+ block = None
351
+ # Build tree on the basis of block char
352
+ if block == "+":
353
+ builder.start(mkr, {})
354
+ builder.data(value)
355
+ elif block == "-":
356
+ builder.end(mkr)
357
+ else:
358
+ builder.start(mkr, {})
359
+ builder.data(value)
360
+ builder.end(mkr)
361
+ return builder.close()
362
+
363
+
364
+ def to_settings_string(tree, encoding=None, errors="strict", unicode_fields=None):
365
+ # write XML to file
366
+ l = list()
367
+ _to_settings_string(
368
+ tree.getroot(),
369
+ l,
370
+ encoding=encoding,
371
+ errors=errors,
372
+ unicode_fields=unicode_fields,
373
+ )
374
+ return "".join(l)
375
+
376
+
377
+ def _to_settings_string(node, l, **kwargs):
378
+ # write XML to file
379
+ tag = node.tag
380
+ text = node.text
381
+ if len(node) == 0:
382
+ if text:
383
+ l.append(f"\\{tag} {text}\n")
384
+ else:
385
+ l.append("\\%s\n" % tag)
386
+ else:
387
+ if text:
388
+ l.append(f"\\+{tag} {text}\n")
389
+ else:
390
+ l.append("\\+%s\n" % tag)
391
+ for n in node:
392
+ _to_settings_string(n, l, **kwargs)
393
+ l.append("\\-%s\n" % tag)
394
+ return
395
+
396
+
397
+ def remove_blanks(elem):
398
+ """
399
+ Remove all elements and subelements with no text and no child elements.
400
+
401
+ :param elem: toolbox data in an elementtree structure
402
+ :type elem: ElementTree._ElementInterface
403
+ """
404
+ out = list()
405
+ for child in elem:
406
+ remove_blanks(child)
407
+ if child.text or len(child) > 0:
408
+ out.append(child)
409
+ elem[:] = out
410
+
411
+
412
+ def add_default_fields(elem, default_fields):
413
+ """
414
+ Add blank elements and subelements specified in default_fields.
415
+
416
+ :param elem: toolbox data in an elementtree structure
417
+ :type elem: ElementTree._ElementInterface
418
+ :param default_fields: fields to add to each type of element and subelement
419
+ :type default_fields: dict(tuple)
420
+ """
421
+ for field in default_fields.get(elem.tag, []):
422
+ if elem.find(field) is None:
423
+ SubElement(elem, field)
424
+ for child in elem:
425
+ add_default_fields(child, default_fields)
426
+
427
+
428
+ def sort_fields(elem, field_orders):
429
+ """
430
+ Sort the elements and subelements in order specified in field_orders.
431
+
432
+ :param elem: toolbox data in an elementtree structure
433
+ :type elem: ElementTree._ElementInterface
434
+ :param field_orders: order of fields for each type of element and subelement
435
+ :type field_orders: dict(tuple)
436
+ """
437
+ order_dicts = dict()
438
+ for field, order in field_orders.items():
439
+ order_dicts[field] = order_key = dict()
440
+ for i, subfield in enumerate(order):
441
+ order_key[subfield] = i
442
+ _sort_fields(elem, order_dicts)
443
+
444
+
445
+ def _sort_fields(elem, orders_dicts):
446
+ """sort the children of elem"""
447
+ try:
448
+ order = orders_dicts[elem.tag]
449
+ except KeyError:
450
+ pass
451
+ else:
452
+ tmp = sorted(
453
+ ((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem)
454
+ )
455
+ elem[:] = [child for key, child in tmp]
456
+ for child in elem:
457
+ if len(child):
458
+ _sort_fields(child, orders_dicts)
459
+
460
+
461
+ def add_blank_lines(tree, blanks_before, blanks_between):
462
+ """
463
+ Add blank lines before all elements and subelements specified in blank_before.
464
+
465
+ :param elem: toolbox data in an elementtree structure
466
+ :type elem: ElementTree._ElementInterface
467
+ :param blank_before: elements and subelements to add blank lines before
468
+ :type blank_before: dict(tuple)
469
+ """
470
+ try:
471
+ before = blanks_before[tree.tag]
472
+ between = blanks_between[tree.tag]
473
+ except KeyError:
474
+ for elem in tree:
475
+ if len(elem):
476
+ add_blank_lines(elem, blanks_before, blanks_between)
477
+ else:
478
+ last_elem = None
479
+ for elem in tree:
480
+ tag = elem.tag
481
+ if last_elem is not None and last_elem.tag != tag:
482
+ if tag in before and last_elem is not None:
483
+ e = last_elem.getiterator()[-1]
484
+ e.text = (e.text or "") + "\n"
485
+ else:
486
+ if tag in between:
487
+ e = last_elem.getiterator()[-1]
488
+ e.text = (e.text or "") + "\n"
489
+ if len(elem):
490
+ add_blank_lines(elem, blanks_before, blanks_between)
491
+ last_elem = elem
492
+
493
+
494
+ def demo():
495
+ from itertools import islice
496
+
497
+ # zip_path = find('corpora/toolbox.zip')
498
+ # lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse()
499
+ file_path = find("corpora/toolbox/rotokas.dic")
500
+ lexicon = ToolboxData(file_path).parse()
501
+ print("first field in fourth record:")
502
+ print(lexicon[3][0].tag)
503
+ print(lexicon[3][0].text)
504
+
505
+ print("\nfields in sequential order:")
506
+ for field in islice(lexicon.find("record"), 10):
507
+ print(field.tag, field.text)
508
+
509
+ print("\nlx fields:")
510
+ for field in islice(lexicon.findall("record/lx"), 10):
511
+ print(field.text)
512
+
513
+ settings = ToolboxSettings()
514
+ file_path = find("corpora/toolbox/MDF/MDF_AltH.typ")
515
+ settings.open(file_path)
516
+ # settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ'))
517
+ tree = settings.parse(unwrap=False, encoding="cp1252")
518
+ print(tree.find("expset/expMDF/rtfPageSetup/paperSize").text)
519
+ settings_tree = ElementTree(tree)
520
+ print(to_settings_string(settings_tree).encode("utf8"))
521
+
522
+
523
+ if __name__ == "__main__":
524
+ demo()