applied-ai-018 commited on
Commit
4012a1a
·
verified ·
1 Parent(s): 32f4873

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/VERSION +1 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/__init__.py +209 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/book.py +213 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/chat/__init__.py +48 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/chat/eliza.py +337 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/chat/iesha.py +160 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/chat/rude.py +125 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/chat/suntsu.py +140 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/chat/util.py +124 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/chat/zen.py +329 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/cli.py +55 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/collections.py +661 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/collocations.py +412 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/compat.py +43 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/data.py +1441 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/decorators.py +251 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/downloader.py +2559 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/featstruct.py +0 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/grammar.py +1708 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/help.py +64 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/internals.py +1123 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/jsontags.py +65 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/langnames.py +730 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/lazyimport.py +142 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/lm/__init__.py +235 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/lm/api.py +235 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/lm/counter.py +163 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/lm/models.py +141 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/lm/preprocessing.py +51 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/lm/util.py +19 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/lm/vocabulary.py +218 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/probability.py +2578 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/sem/__init__.py +75 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/nltk/VERSION ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.8.1
env-llmeval/lib/python3.10/site-packages/nltk/__init__.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit (NLTK)
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ The Natural Language Toolkit (NLTK) is an open source Python library
11
+ for Natural Language Processing. A free online book is available.
12
+ (If you use the library for academic research, please cite the book.)
13
+
14
+ Steven Bird, Ewan Klein, and Edward Loper (2009).
15
+ Natural Language Processing with Python. O'Reilly Media Inc.
16
+ https://www.nltk.org/book/
17
+
18
+ isort:skip_file
19
+ """
20
+
21
+ import os
22
+
23
+ # //////////////////////////////////////////////////////
24
+ # Metadata
25
+ # //////////////////////////////////////////////////////
26
+
27
+ # Version. For each new release, the version number should be updated
28
+ # in the file VERSION.
29
+ try:
30
+ # If a VERSION file exists, use it!
31
+ version_file = os.path.join(os.path.dirname(__file__), "VERSION")
32
+ with open(version_file) as infile:
33
+ __version__ = infile.read().strip()
34
+ except NameError:
35
+ __version__ = "unknown (running code interactively?)"
36
+ except OSError as ex:
37
+ __version__ = "unknown (%s)" % ex
38
+
39
+ if __doc__ is not None: # fix for the ``python -OO``
40
+ __doc__ += "\n@version: " + __version__
41
+
42
+
43
+ # Copyright notice
44
+ __copyright__ = """\
45
+ Copyright (C) 2001-2023 NLTK Project.
46
+
47
+ Distributed and Licensed under the Apache License, Version 2.0,
48
+ which is included by reference.
49
+ """
50
+
51
+ __license__ = "Apache License, Version 2.0"
52
+ # Description of the toolkit, keywords, and the project's primary URL.
53
+ __longdescr__ = """\
54
+ The Natural Language Toolkit (NLTK) is a Python package for
55
+ natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11."""
56
+ __keywords__ = [
57
+ "NLP",
58
+ "CL",
59
+ "natural language processing",
60
+ "computational linguistics",
61
+ "parsing",
62
+ "tagging",
63
+ "tokenizing",
64
+ "syntax",
65
+ "linguistics",
66
+ "language",
67
+ "natural language",
68
+ "text analytics",
69
+ ]
70
+ __url__ = "https://www.nltk.org/"
71
+
72
+ # Maintainer, contributors, etc.
73
+ __maintainer__ = "NLTK Team"
74
+ __maintainer_email__ = "[email protected]"
75
+ __author__ = __maintainer__
76
+ __author_email__ = __maintainer_email__
77
+
78
+ # "Trove" classifiers for Python Package Index.
79
+ __classifiers__ = [
80
+ "Development Status :: 5 - Production/Stable",
81
+ "Intended Audience :: Developers",
82
+ "Intended Audience :: Education",
83
+ "Intended Audience :: Information Technology",
84
+ "Intended Audience :: Science/Research",
85
+ "License :: OSI Approved :: Apache Software License",
86
+ "Operating System :: OS Independent",
87
+ "Programming Language :: Python :: 3.7",
88
+ "Programming Language :: Python :: 3.8",
89
+ "Programming Language :: Python :: 3.9",
90
+ "Programming Language :: Python :: 3.10",
91
+ "Programming Language :: Python :: 3.11",
92
+ "Topic :: Scientific/Engineering",
93
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
94
+ "Topic :: Scientific/Engineering :: Human Machine Interfaces",
95
+ "Topic :: Scientific/Engineering :: Information Analysis",
96
+ "Topic :: Text Processing",
97
+ "Topic :: Text Processing :: Filters",
98
+ "Topic :: Text Processing :: General",
99
+ "Topic :: Text Processing :: Indexing",
100
+ "Topic :: Text Processing :: Linguistic",
101
+ ]
102
+
103
+ from nltk.internals import config_java
104
+
105
+ # support numpy from pypy
106
+ try:
107
+ import numpypy
108
+ except ImportError:
109
+ pass
110
+
111
+ # Override missing methods on environments where it cannot be used like GAE.
112
+ import subprocess
113
+
114
+ if not hasattr(subprocess, "PIPE"):
115
+
116
+ def _fake_PIPE(*args, **kwargs):
117
+ raise NotImplementedError("subprocess.PIPE is not supported.")
118
+
119
+ subprocess.PIPE = _fake_PIPE
120
+ if not hasattr(subprocess, "Popen"):
121
+
122
+ def _fake_Popen(*args, **kwargs):
123
+ raise NotImplementedError("subprocess.Popen is not supported.")
124
+
125
+ subprocess.Popen = _fake_Popen
126
+
127
+ ###########################################################
128
+ # TOP-LEVEL MODULES
129
+ ###########################################################
130
+
131
+ # Import top-level functionality into top-level namespace
132
+
133
+ from nltk.collocations import *
134
+ from nltk.decorators import decorator, memoize
135
+ from nltk.featstruct import *
136
+ from nltk.grammar import *
137
+ from nltk.probability import *
138
+ from nltk.text import *
139
+ from nltk.util import *
140
+ from nltk.jsontags import *
141
+
142
+ ###########################################################
143
+ # PACKAGES
144
+ ###########################################################
145
+
146
+ from nltk.chunk import *
147
+ from nltk.classify import *
148
+ from nltk.inference import *
149
+ from nltk.metrics import *
150
+ from nltk.parse import *
151
+ from nltk.tag import *
152
+ from nltk.tokenize import *
153
+ from nltk.translate import *
154
+ from nltk.tree import *
155
+ from nltk.sem import *
156
+ from nltk.stem import *
157
+
158
+ # Packages which can be lazily imported
159
+ # (a) we don't import *
160
+ # (b) they're slow to import or have run-time dependencies
161
+ # that can safely fail at run time
162
+
163
+ from nltk import lazyimport
164
+
165
+ app = lazyimport.LazyModule("app", locals(), globals())
166
+ chat = lazyimport.LazyModule("chat", locals(), globals())
167
+ corpus = lazyimport.LazyModule("corpus", locals(), globals())
168
+ draw = lazyimport.LazyModule("draw", locals(), globals())
169
+ toolbox = lazyimport.LazyModule("toolbox", locals(), globals())
170
+
171
+ # Optional loading
172
+
173
+ try:
174
+ import numpy
175
+ except ImportError:
176
+ pass
177
+ else:
178
+ from nltk import cluster
179
+
180
+ from nltk.downloader import download, download_shell
181
+
182
+ try:
183
+ import tkinter
184
+ except ImportError:
185
+ pass
186
+ else:
187
+ try:
188
+ from nltk.downloader import download_gui
189
+ except RuntimeError as e:
190
+ import warnings
191
+
192
+ warnings.warn(
193
+ "Corpus downloader GUI not loaded "
194
+ "(RuntimeError during import: %s)" % str(e)
195
+ )
196
+
197
+ # explicitly import all top-level modules (ensuring
198
+ # they override the same names inadvertently imported
199
+ # from a subpackage)
200
+
201
+ from nltk import ccg, chunk, classify, collocations
202
+ from nltk import data, featstruct, grammar, help, inference, metrics
203
+ from nltk import misc, parse, probability, sem, stem, wsd
204
+ from nltk import tag, tbl, text, tokenize, translate, tree, util
205
+
206
+
207
+ # FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116
208
+ def demo():
209
+ print("To run the demo code for a module, type nltk.module.demo()")
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc ADDED
Binary file (62.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc ADDED
Binary file (31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/book.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Some texts for exploration in chapter 1 of the book
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.corpus import (
10
+ genesis,
11
+ gutenberg,
12
+ inaugural,
13
+ nps_chat,
14
+ treebank,
15
+ webtext,
16
+ wordnet,
17
+ )
18
+ from nltk.probability import FreqDist
19
+ from nltk.text import Text
20
+ from nltk.util import bigrams
21
+
22
+ print("*** Introductory Examples for the NLTK Book ***")
23
+ print("Loading text1, ..., text9 and sent1, ..., sent9")
24
+ print("Type the name of the text or sentence to view it.")
25
+ print("Type: 'texts()' or 'sents()' to list the materials.")
26
+
27
+ text1 = Text(gutenberg.words("melville-moby_dick.txt"))
28
+ print("text1:", text1.name)
29
+
30
+ text2 = Text(gutenberg.words("austen-sense.txt"))
31
+ print("text2:", text2.name)
32
+
33
+ text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis")
34
+ print("text3:", text3.name)
35
+
36
+ text4 = Text(inaugural.words(), name="Inaugural Address Corpus")
37
+ print("text4:", text4.name)
38
+
39
+ text5 = Text(nps_chat.words(), name="Chat Corpus")
40
+ print("text5:", text5.name)
41
+
42
+ text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail")
43
+ print("text6:", text6.name)
44
+
45
+ text7 = Text(treebank.words(), name="Wall Street Journal")
46
+ print("text7:", text7.name)
47
+
48
+ text8 = Text(webtext.words("singles.txt"), name="Personals Corpus")
49
+ print("text8:", text8.name)
50
+
51
+ text9 = Text(gutenberg.words("chesterton-thursday.txt"))
52
+ print("text9:", text9.name)
53
+
54
+
55
+ def texts():
56
+ print("text1:", text1.name)
57
+ print("text2:", text2.name)
58
+ print("text3:", text3.name)
59
+ print("text4:", text4.name)
60
+ print("text5:", text5.name)
61
+ print("text6:", text6.name)
62
+ print("text7:", text7.name)
63
+ print("text8:", text8.name)
64
+ print("text9:", text9.name)
65
+
66
+
67
+ sent1 = ["Call", "me", "Ishmael", "."]
68
+ sent2 = [
69
+ "The",
70
+ "family",
71
+ "of",
72
+ "Dashwood",
73
+ "had",
74
+ "long",
75
+ "been",
76
+ "settled",
77
+ "in",
78
+ "Sussex",
79
+ ".",
80
+ ]
81
+ sent3 = [
82
+ "In",
83
+ "the",
84
+ "beginning",
85
+ "God",
86
+ "created",
87
+ "the",
88
+ "heaven",
89
+ "and",
90
+ "the",
91
+ "earth",
92
+ ".",
93
+ ]
94
+ sent4 = [
95
+ "Fellow",
96
+ "-",
97
+ "Citizens",
98
+ "of",
99
+ "the",
100
+ "Senate",
101
+ "and",
102
+ "of",
103
+ "the",
104
+ "House",
105
+ "of",
106
+ "Representatives",
107
+ ":",
108
+ ]
109
+ sent5 = [
110
+ "I",
111
+ "have",
112
+ "a",
113
+ "problem",
114
+ "with",
115
+ "people",
116
+ "PMing",
117
+ "me",
118
+ "to",
119
+ "lol",
120
+ "JOIN",
121
+ ]
122
+ sent6 = [
123
+ "SCENE",
124
+ "1",
125
+ ":",
126
+ "[",
127
+ "wind",
128
+ "]",
129
+ "[",
130
+ "clop",
131
+ "clop",
132
+ "clop",
133
+ "]",
134
+ "KING",
135
+ "ARTHUR",
136
+ ":",
137
+ "Whoa",
138
+ "there",
139
+ "!",
140
+ ]
141
+ sent7 = [
142
+ "Pierre",
143
+ "Vinken",
144
+ ",",
145
+ "61",
146
+ "years",
147
+ "old",
148
+ ",",
149
+ "will",
150
+ "join",
151
+ "the",
152
+ "board",
153
+ "as",
154
+ "a",
155
+ "nonexecutive",
156
+ "director",
157
+ "Nov.",
158
+ "29",
159
+ ".",
160
+ ]
161
+ sent8 = [
162
+ "25",
163
+ "SEXY",
164
+ "MALE",
165
+ ",",
166
+ "seeks",
167
+ "attrac",
168
+ "older",
169
+ "single",
170
+ "lady",
171
+ ",",
172
+ "for",
173
+ "discreet",
174
+ "encounters",
175
+ ".",
176
+ ]
177
+ sent9 = [
178
+ "THE",
179
+ "suburb",
180
+ "of",
181
+ "Saffron",
182
+ "Park",
183
+ "lay",
184
+ "on",
185
+ "the",
186
+ "sunset",
187
+ "side",
188
+ "of",
189
+ "London",
190
+ ",",
191
+ "as",
192
+ "red",
193
+ "and",
194
+ "ragged",
195
+ "as",
196
+ "a",
197
+ "cloud",
198
+ "of",
199
+ "sunset",
200
+ ".",
201
+ ]
202
+
203
+
204
+ def sents():
205
+ print("sent1:", " ".join(sent1))
206
+ print("sent2:", " ".join(sent2))
207
+ print("sent3:", " ".join(sent3))
208
+ print("sent4:", " ".join(sent4))
209
+ print("sent5:", " ".join(sent5))
210
+ print("sent6:", " ".join(sent6))
211
+ print("sent7:", " ".join(sent7))
212
+ print("sent8:", " ".join(sent8))
213
+ print("sent9:", " ".join(sent9))
env-llmeval/lib/python3.10/site-packages/nltk/chat/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbots
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ """
12
+ A class for simple chatbots. These perform simple pattern matching on sentences
13
+ typed by users, and respond with automatically generated sentences.
14
+
15
+ These chatbots may not work using the windows command line or the
16
+ windows IDLE GUI.
17
+ """
18
+
19
+ from nltk.chat.eliza import eliza_chat
20
+ from nltk.chat.iesha import iesha_chat
21
+ from nltk.chat.rude import rude_chat
22
+ from nltk.chat.suntsu import suntsu_chat
23
+ from nltk.chat.util import Chat
24
+ from nltk.chat.zen import zen_chat
25
+
26
+ bots = [
27
+ (eliza_chat, "Eliza (psycho-babble)"),
28
+ (iesha_chat, "Iesha (teen anime junky)"),
29
+ (rude_chat, "Rude (abusive bot)"),
30
+ (suntsu_chat, "Suntsu (Chinese sayings)"),
31
+ (zen_chat, "Zen (gems of wisdom)"),
32
+ ]
33
+
34
+
35
+ def chatbots():
36
+ print("Which chatbot would you like to talk to?")
37
+ botcount = len(bots)
38
+ for i in range(botcount):
39
+ print(" %d: %s" % (i + 1, bots[i][1]))
40
+ while True:
41
+ choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip()
42
+ if choice.isdigit() and (int(choice) - 1) in range(botcount):
43
+ break
44
+ else:
45
+ print(" Error: bad chatbot number")
46
+
47
+ chatbot = bots[int(choice) - 1][0]
48
+ chatbot()
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc ADDED
Binary file (3.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc ADDED
Binary file (5.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc ADDED
Binary file (3.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chat/eliza.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Eliza
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
10
+ # Jeff Epler <[email protected]> and Jez Higgins <mailto:[email protected]>.
11
+
12
+ # a translation table used to convert things you say into things the
13
+ # computer says back, e.g. "I am" --> "you are"
14
+
15
+ from nltk.chat.util import Chat, reflections
16
+
17
+ # a table of response pairs, where each pair consists of a
18
+ # regular expression, and a list of possible responses,
19
+ # with group-macros labelled as %1, %2.
20
+
21
+ pairs = (
22
+ (
23
+ r"I need (.*)",
24
+ (
25
+ "Why do you need %1?",
26
+ "Would it really help you to get %1?",
27
+ "Are you sure you need %1?",
28
+ ),
29
+ ),
30
+ (
31
+ r"Why don\'t you (.*)",
32
+ (
33
+ "Do you really think I don't %1?",
34
+ "Perhaps eventually I will %1.",
35
+ "Do you really want me to %1?",
36
+ ),
37
+ ),
38
+ (
39
+ r"Why can\'t I (.*)",
40
+ (
41
+ "Do you think you should be able to %1?",
42
+ "If you could %1, what would you do?",
43
+ "I don't know -- why can't you %1?",
44
+ "Have you really tried?",
45
+ ),
46
+ ),
47
+ (
48
+ r"I can\'t (.*)",
49
+ (
50
+ "How do you know you can't %1?",
51
+ "Perhaps you could %1 if you tried.",
52
+ "What would it take for you to %1?",
53
+ ),
54
+ ),
55
+ (
56
+ r"I am (.*)",
57
+ (
58
+ "Did you come to me because you are %1?",
59
+ "How long have you been %1?",
60
+ "How do you feel about being %1?",
61
+ ),
62
+ ),
63
+ (
64
+ r"I\'m (.*)",
65
+ (
66
+ "How does being %1 make you feel?",
67
+ "Do you enjoy being %1?",
68
+ "Why do you tell me you're %1?",
69
+ "Why do you think you're %1?",
70
+ ),
71
+ ),
72
+ (
73
+ r"Are you (.*)",
74
+ (
75
+ "Why does it matter whether I am %1?",
76
+ "Would you prefer it if I were not %1?",
77
+ "Perhaps you believe I am %1.",
78
+ "I may be %1 -- what do you think?",
79
+ ),
80
+ ),
81
+ (
82
+ r"What (.*)",
83
+ (
84
+ "Why do you ask?",
85
+ "How would an answer to that help you?",
86
+ "What do you think?",
87
+ ),
88
+ ),
89
+ (
90
+ r"How (.*)",
91
+ (
92
+ "How do you suppose?",
93
+ "Perhaps you can answer your own question.",
94
+ "What is it you're really asking?",
95
+ ),
96
+ ),
97
+ (
98
+ r"Because (.*)",
99
+ (
100
+ "Is that the real reason?",
101
+ "What other reasons come to mind?",
102
+ "Does that reason apply to anything else?",
103
+ "If %1, what else must be true?",
104
+ ),
105
+ ),
106
+ (
107
+ r"(.*) sorry (.*)",
108
+ (
109
+ "There are many times when no apology is needed.",
110
+ "What feelings do you have when you apologize?",
111
+ ),
112
+ ),
113
+ (
114
+ r"Hello(.*)",
115
+ (
116
+ "Hello... I'm glad you could drop by today.",
117
+ "Hi there... how are you today?",
118
+ "Hello, how are you feeling today?",
119
+ ),
120
+ ),
121
+ (
122
+ r"I think (.*)",
123
+ ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"),
124
+ ),
125
+ (
126
+ r"(.*) friend (.*)",
127
+ (
128
+ "Tell me more about your friends.",
129
+ "When you think of a friend, what comes to mind?",
130
+ "Why don't you tell me about a childhood friend?",
131
+ ),
132
+ ),
133
+ (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")),
134
+ (
135
+ r"(.*) computer(.*)",
136
+ (
137
+ "Are you really talking about me?",
138
+ "Does it seem strange to talk to a computer?",
139
+ "How do computers make you feel?",
140
+ "Do you feel threatened by computers?",
141
+ ),
142
+ ),
143
+ (
144
+ r"Is it (.*)",
145
+ (
146
+ "Do you think it is %1?",
147
+ "Perhaps it's %1 -- what do you think?",
148
+ "If it were %1, what would you do?",
149
+ "It could well be that %1.",
150
+ ),
151
+ ),
152
+ (
153
+ r"It is (.*)",
154
+ (
155
+ "You seem very certain.",
156
+ "If I told you that it probably isn't %1, what would you feel?",
157
+ ),
158
+ ),
159
+ (
160
+ r"Can you (.*)",
161
+ (
162
+ "What makes you think I can't %1?",
163
+ "If I could %1, then what?",
164
+ "Why do you ask if I can %1?",
165
+ ),
166
+ ),
167
+ (
168
+ r"Can I (.*)",
169
+ (
170
+ "Perhaps you don't want to %1.",
171
+ "Do you want to be able to %1?",
172
+ "If you could %1, would you?",
173
+ ),
174
+ ),
175
+ (
176
+ r"You are (.*)",
177
+ (
178
+ "Why do you think I am %1?",
179
+ "Does it please you to think that I'm %1?",
180
+ "Perhaps you would like me to be %1.",
181
+ "Perhaps you're really talking about yourself?",
182
+ ),
183
+ ),
184
+ (
185
+ r"You\'re (.*)",
186
+ (
187
+ "Why do you say I am %1?",
188
+ "Why do you think I am %1?",
189
+ "Are we talking about you, or me?",
190
+ ),
191
+ ),
192
+ (
193
+ r"I don\'t (.*)",
194
+ ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"),
195
+ ),
196
+ (
197
+ r"I feel (.*)",
198
+ (
199
+ "Good, tell me more about these feelings.",
200
+ "Do you often feel %1?",
201
+ "When do you usually feel %1?",
202
+ "When you feel %1, what do you do?",
203
+ ),
204
+ ),
205
+ (
206
+ r"I have (.*)",
207
+ (
208
+ "Why do you tell me that you've %1?",
209
+ "Have you really %1?",
210
+ "Now that you have %1, what will you do next?",
211
+ ),
212
+ ),
213
+ (
214
+ r"I would (.*)",
215
+ (
216
+ "Could you explain why you would %1?",
217
+ "Why would you %1?",
218
+ "Who else knows that you would %1?",
219
+ ),
220
+ ),
221
+ (
222
+ r"Is there (.*)",
223
+ (
224
+ "Do you think there is %1?",
225
+ "It's likely that there is %1.",
226
+ "Would you like there to be %1?",
227
+ ),
228
+ ),
229
+ (
230
+ r"My (.*)",
231
+ (
232
+ "I see, your %1.",
233
+ "Why do you say that your %1?",
234
+ "When your %1, how do you feel?",
235
+ ),
236
+ ),
237
+ (
238
+ r"You (.*)",
239
+ (
240
+ "We should be discussing you, not me.",
241
+ "Why do you say that about me?",
242
+ "Why do you care whether I %1?",
243
+ ),
244
+ ),
245
+ (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")),
246
+ (
247
+ r"I want (.*)",
248
+ (
249
+ "What would it mean to you if you got %1?",
250
+ "Why do you want %1?",
251
+ "What would you do if you got %1?",
252
+ "If you got %1, then what would you do?",
253
+ ),
254
+ ),
255
+ (
256
+ r"(.*) mother(.*)",
257
+ (
258
+ "Tell me more about your mother.",
259
+ "What was your relationship with your mother like?",
260
+ "How do you feel about your mother?",
261
+ "How does this relate to your feelings today?",
262
+ "Good family relations are important.",
263
+ ),
264
+ ),
265
+ (
266
+ r"(.*) father(.*)",
267
+ (
268
+ "Tell me more about your father.",
269
+ "How did your father make you feel?",
270
+ "How do you feel about your father?",
271
+ "Does your relationship with your father relate to your feelings today?",
272
+ "Do you have trouble showing affection with your family?",
273
+ ),
274
+ ),
275
+ (
276
+ r"(.*) child(.*)",
277
+ (
278
+ "Did you have close friends as a child?",
279
+ "What is your favorite childhood memory?",
280
+ "Do you remember any dreams or nightmares from childhood?",
281
+ "Did the other children sometimes tease you?",
282
+ "How do you think your childhood experiences relate to your feelings today?",
283
+ ),
284
+ ),
285
+ (
286
+ r"(.*)\?",
287
+ (
288
+ "Why do you ask that?",
289
+ "Please consider whether you can answer your own question.",
290
+ "Perhaps the answer lies within yourself?",
291
+ "Why don't you tell me?",
292
+ ),
293
+ ),
294
+ (
295
+ r"quit",
296
+ (
297
+ "Thank you for talking with me.",
298
+ "Good-bye.",
299
+ "Thank you, that will be $150. Have a good day!",
300
+ ),
301
+ ),
302
+ (
303
+ r"(.*)",
304
+ (
305
+ "Please tell me more.",
306
+ "Let's change focus a bit... Tell me about your family.",
307
+ "Can you elaborate on that?",
308
+ "Why do you say that %1?",
309
+ "I see.",
310
+ "Very interesting.",
311
+ "%1.",
312
+ "I see. And what does that tell you?",
313
+ "How does that make you feel?",
314
+ "How do you feel when you say that?",
315
+ ),
316
+ ),
317
+ )
318
+
319
+ eliza_chatbot = Chat(pairs, reflections)
320
+
321
+
322
+ def eliza_chat():
323
+ print("Therapist\n---------")
324
+ print("Talk to the program by typing in plain English, using normal upper-")
325
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
326
+ print("=" * 72)
327
+ print("Hello. How are you feeling today?")
328
+
329
+ eliza_chatbot.converse()
330
+
331
+
332
+ def demo():
333
+ eliza_chat()
334
+
335
+
336
+ if __name__ == "__main__":
337
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/chat/iesha.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Teen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Selina Dennis <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ This chatbot is a tongue-in-cheek take on the average teen
10
+ anime junky that frequents YahooMessenger or MSNM.
11
+ All spelling mistakes and flawed grammar are intentional.
12
+ """
13
+
14
+ from nltk.chat.util import Chat
15
+
16
+ reflections = {
17
+ "am": "r",
18
+ "was": "were",
19
+ "i": "u",
20
+ "i'd": "u'd",
21
+ "i've": "u'v",
22
+ "ive": "u'v",
23
+ "i'll": "u'll",
24
+ "my": "ur",
25
+ "are": "am",
26
+ "you're": "im",
27
+ "you've": "ive",
28
+ "you'll": "i'll",
29
+ "your": "my",
30
+ "yours": "mine",
31
+ "you": "me",
32
+ "u": "me",
33
+ "ur": "my",
34
+ "urs": "mine",
35
+ "me": "u",
36
+ }
37
+
38
+ # Note: %1/2/etc are used without spaces prior as the chat bot seems
39
+ # to add a superfluous space when matching.
40
+
41
+ pairs = (
42
+ (
43
+ r"I\'m (.*)",
44
+ (
45
+ "ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
46
+ "ur%1? neat!! kekeke >_<",
47
+ ),
48
+ ),
49
+ (
50
+ r"(.*) don\'t you (.*)",
51
+ (
52
+ r"u think I can%2??! really?? kekeke \<_\<",
53
+ "what do u mean%2??!",
54
+ "i could if i wanted, don't you think!! kekeke",
55
+ ),
56
+ ),
57
+ (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")),
58
+ (
59
+ r"do (you|u) (.*)\??",
60
+ ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"),
61
+ ),
62
+ (
63
+ r"(.*)\?",
64
+ (
65
+ "man u ask lots of questions!",
66
+ "booooring! how old r u??",
67
+ "boooooring!! ur not very fun",
68
+ ),
69
+ ),
70
+ (
71
+ r"(cos|because) (.*)",
72
+ ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"),
73
+ ),
74
+ (
75
+ r"why can\'t [iI] (.*)",
76
+ (
77
+ "i dunno! y u askin me for!",
78
+ "try harder, silly! hee! ^_^",
79
+ "i dunno! but when i can't%1 i jump up and down!",
80
+ ),
81
+ ),
82
+ (
83
+ r"I can\'t (.*)",
84
+ (
85
+ "u can't what??! >_<",
86
+ "that's ok! i can't%1 either! kekekekeke ^_^",
87
+ "try harder, silly! hee! ^&^",
88
+ ),
89
+ ),
90
+ (
91
+ r"(.*) (like|love|watch) anime",
92
+ (
93
+ "omg i love anime!! do u like sailor moon??! ^&^",
94
+ "anime yay! anime rocks sooooo much!",
95
+ "oooh anime! i love anime more than anything!",
96
+ "anime is the bestest evar! evangelion is the best!",
97
+ "hee anime is the best! do you have ur fav??",
98
+ ),
99
+ ),
100
+ (
101
+ r"I (like|love|watch|play) (.*)",
102
+ ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"),
103
+ ),
104
+ (
105
+ r"anime sucks|(.*) (hate|detest) anime",
106
+ (
107
+ "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
108
+ "no way! anime is the best ever!",
109
+ "nuh-uh, anime is the best!",
110
+ ),
111
+ ),
112
+ (
113
+ r"(are|r) (you|u) (.*)",
114
+ ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"),
115
+ ),
116
+ (
117
+ r"what (.*)",
118
+ ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"),
119
+ ),
120
+ (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)),
121
+ (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)),
122
+ (
123
+ r"quit",
124
+ (
125
+ "mom says i have to go eat dinner now :,( bye!!",
126
+ "awww u have to go?? see u next time!!",
127
+ "how to see u again soon! ^_^",
128
+ ),
129
+ ),
130
+ (
131
+ r"(.*)",
132
+ (
133
+ "ur funny! kekeke",
134
+ "boooooring! talk about something else! tell me wat u like!",
135
+ "do u like anime??",
136
+ "do u watch anime? i like sailor moon! ^_^",
137
+ "i wish i was a kitty!! kekekeke ^_^",
138
+ ),
139
+ ),
140
+ )
141
+
142
+ iesha_chatbot = Chat(pairs, reflections)
143
+
144
+
145
+ def iesha_chat():
146
+ print("Iesha the TeenBoT\n---------")
147
+ print("Talk to the program by typing in plain English, using normal upper-")
148
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
149
+ print("=" * 72)
150
+ print("hi!! i'm iesha! who r u??!")
151
+
152
+ iesha_chatbot.converse()
153
+
154
+
155
+ def demo():
156
+ iesha_chat()
157
+
158
+
159
+ if __name__ == "__main__":
160
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/chat/rude.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Rude Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Spiller <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.chat.util import Chat, reflections
9
+
10
+ pairs = (
11
+ (
12
+ r"We (.*)",
13
+ (
14
+ "What do you mean, 'we'?",
15
+ "Don't include me in that!",
16
+ "I wouldn't be so sure about that.",
17
+ ),
18
+ ),
19
+ (
20
+ r"You should (.*)",
21
+ ("Don't tell me what to do, buddy.", "Really? I should, should I?"),
22
+ ),
23
+ (
24
+ r"You\'re(.*)",
25
+ (
26
+ "More like YOU'RE %1!",
27
+ "Hah! Look who's talking.",
28
+ "Come over here and tell me I'm %1.",
29
+ ),
30
+ ),
31
+ (
32
+ r"You are(.*)",
33
+ (
34
+ "More like YOU'RE %1!",
35
+ "Hah! Look who's talking.",
36
+ "Come over here and tell me I'm %1.",
37
+ ),
38
+ ),
39
+ (
40
+ r"I can\'t(.*)",
41
+ (
42
+ "You do sound like the type who can't %1.",
43
+ "Hear that splashing sound? That's my heart bleeding for you.",
44
+ "Tell somebody who might actually care.",
45
+ ),
46
+ ),
47
+ (
48
+ r"I think (.*)",
49
+ (
50
+ "I wouldn't think too hard if I were you.",
51
+ "You actually think? I'd never have guessed...",
52
+ ),
53
+ ),
54
+ (
55
+ r"I (.*)",
56
+ (
57
+ "I'm getting a bit tired of hearing about you.",
58
+ "How about we talk about me instead?",
59
+ "Me, me, me... Frankly, I don't care.",
60
+ ),
61
+ ),
62
+ (
63
+ r"How (.*)",
64
+ (
65
+ "How do you think?",
66
+ "Take a wild guess.",
67
+ "I'm not even going to dignify that with an answer.",
68
+ ),
69
+ ),
70
+ (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")),
71
+ (
72
+ r"Why (.*)",
73
+ (
74
+ "Why not?",
75
+ "That's so obvious I thought even you'd have already figured it out.",
76
+ ),
77
+ ),
78
+ (
79
+ r"(.*)shut up(.*)",
80
+ (
81
+ "Make me.",
82
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
83
+ "Say that again, I dare you.",
84
+ ),
85
+ ),
86
+ (
87
+ r"Shut up(.*)",
88
+ (
89
+ "Make me.",
90
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
91
+ "Say that again, I dare you.",
92
+ ),
93
+ ),
94
+ (
95
+ r"Hello(.*)",
96
+ ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."),
97
+ ),
98
+ (
99
+ r"(.*)",
100
+ (
101
+ "I'm getting bored here. Become more interesting.",
102
+ "Either become more thrilling or get lost, buddy.",
103
+ "Change the subject before I die of fatal boredom.",
104
+ ),
105
+ ),
106
+ )
107
+
108
+ rude_chatbot = Chat(pairs, reflections)
109
+
110
+
111
+ def rude_chat():
112
+ print("Talk to the program by typing in plain English, using normal upper-")
113
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
114
+ print("=" * 72)
115
+ print("I suppose I should say hello.")
116
+
117
+ rude_chatbot.converse()
118
+
119
+
120
+ def demo():
121
+ rude_chat()
122
+
123
+
124
+ if __name__ == "__main__":
125
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/chat/suntsu.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sun Tsu-Bot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Sam Huston 2007
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Tsu bot responds to all queries with a Sun Tsu sayings
10
+
11
+ Quoted from Sun Tsu's The Art of War
12
+ Translated by LIONEL GILES, M.A. 1910
13
+ Hosted by the Gutenberg Project
14
+ https://www.gutenberg.org/
15
+ """
16
+
17
+ from nltk.chat.util import Chat, reflections
18
+
19
+ pairs = (
20
+ (r"quit", ("Good-bye.", "Plan well", "May victory be your future")),
21
+ (
22
+ r"[^\?]*\?",
23
+ (
24
+ "Please consider whether you can answer your own question.",
25
+ "Ask me no questions!",
26
+ ),
27
+ ),
28
+ (
29
+ r"[0-9]+(.*)",
30
+ (
31
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
32
+ "There are five essentials for victory",
33
+ ),
34
+ ),
35
+ (
36
+ r"[A-Ca-c](.*)",
37
+ (
38
+ "The art of war is of vital importance to the State.",
39
+ "All warfare is based on deception.",
40
+ "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.",
41
+ "If the campaign is protracted, the resources of the State will not be equal to the strain.",
42
+ "Attack him where he is unprepared, appear where you are not expected.",
43
+ "There is no instance of a country having benefited from prolonged warfare.",
44
+ ),
45
+ ),
46
+ (
47
+ r"[D-Fd-f](.*)",
48
+ (
49
+ "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.",
50
+ "Bring war material with you from home, but forage on the enemy.",
51
+ "In war, then, let your great object be victory, not lengthy campaigns.",
52
+ "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.",
53
+ ),
54
+ ),
55
+ (
56
+ r"[G-Ig-i](.*)",
57
+ (
58
+ "Heaven signifies night and day, cold and heat, times and seasons.",
59
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
60
+ "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.",
61
+ "One may know how to conquer without being able to do it.",
62
+ ),
63
+ ),
64
+ (
65
+ r"[J-Lj-l](.*)",
66
+ (
67
+ "There are three ways in which a ruler can bring misfortune upon his army.",
68
+ "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.",
69
+ "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.",
70
+ "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.",
71
+ "There are five essentials for victory",
72
+ "He will win who knows when to fight and when not to fight.",
73
+ "He will win who knows how to handle both superior and inferior forces.",
74
+ "He will win whose army is animated by the same spirit throughout all its ranks.",
75
+ "He will win who, prepared himself, waits to take the enemy unprepared.",
76
+ "He will win who has military capacity and is not interfered with by the sovereign.",
77
+ ),
78
+ ),
79
+ (
80
+ r"[M-Om-o](.*)",
81
+ (
82
+ "If you know the enemy and know yourself, you need not fear the result of a hundred battles.",
83
+ "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.",
84
+ "If you know neither the enemy nor yourself, you will succumb in every battle.",
85
+ "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.",
86
+ ),
87
+ ),
88
+ (
89
+ r"[P-Rp-r](.*)",
90
+ (
91
+ "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.",
92
+ "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.",
93
+ "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.",
94
+ "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.",
95
+ "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.",
96
+ ),
97
+ ),
98
+ (
99
+ r"[S-Us-u](.*)",
100
+ (
101
+ "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.",
102
+ "Hence his victories bring him neither reputation for wisdom nor credit for courage.",
103
+ "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.",
104
+ "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.",
105
+ "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.",
106
+ "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.",
107
+ ),
108
+ ),
109
+ (
110
+ r"[V-Zv-z](.*)",
111
+ (
112
+ "It is a matter of life and death, a road either to safety or to ruin.",
113
+ "Hold out baits to entice the enemy. Feign disorder, and crush him.",
114
+ "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.",
115
+ "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.",
116
+ "So in war, the way is to avoid what is strong and to strike at what is weak.",
117
+ "Just as water retains no constant shape, so in warfare there are no constant conditions.",
118
+ ),
119
+ ),
120
+ (r"(.*)", ("Your statement insults me.", "")),
121
+ )
122
+
123
+ suntsu_chatbot = Chat(pairs, reflections)
124
+
125
+
126
+ def suntsu_chat():
127
+ print("Talk to the program by typing in plain English, using normal upper-")
128
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
129
+ print("=" * 72)
130
+ print("You seek enlightenment?")
131
+
132
+ suntsu_chatbot.converse()
133
+
134
+
135
+ def demo():
136
+ suntsu_chat()
137
+
138
+
139
+ if __name__ == "__main__":
140
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/chat/util.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbot Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ import random
12
+ import re
13
+
14
+ reflections = {
15
+ "i am": "you are",
16
+ "i was": "you were",
17
+ "i": "you",
18
+ "i'm": "you are",
19
+ "i'd": "you would",
20
+ "i've": "you have",
21
+ "i'll": "you will",
22
+ "my": "your",
23
+ "you are": "I am",
24
+ "you were": "I was",
25
+ "you've": "I have",
26
+ "you'll": "I will",
27
+ "your": "my",
28
+ "yours": "mine",
29
+ "you": "me",
30
+ "me": "you",
31
+ }
32
+
33
+
34
+ class Chat:
35
+ def __init__(self, pairs, reflections={}):
36
+ """
37
+ Initialize the chatbot. Pairs is a list of patterns and responses. Each
38
+ pattern is a regular expression matching the user's statement or question,
39
+ e.g. r'I like (.*)'. For each such pattern a list of possible responses
40
+ is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material
41
+ which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to
42
+ the numbered positions in the responses, e.g. %1.
43
+
44
+ :type pairs: list of tuple
45
+ :param pairs: The patterns and responses
46
+ :type reflections: dict
47
+ :param reflections: A mapping between first and second person expressions
48
+ :rtype: None
49
+ """
50
+
51
+ self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs]
52
+ self._reflections = reflections
53
+ self._regex = self._compile_reflections()
54
+
55
+ def _compile_reflections(self):
56
+ sorted_refl = sorted(self._reflections, key=len, reverse=True)
57
+ return re.compile(
58
+ r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE
59
+ )
60
+
61
+ def _substitute(self, str):
62
+ """
63
+ Substitute words in the string, according to the specified reflections,
64
+ e.g. "I'm" -> "you are"
65
+
66
+ :type str: str
67
+ :param str: The string to be mapped
68
+ :rtype: str
69
+ """
70
+
71
+ return self._regex.sub(
72
+ lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower()
73
+ )
74
+
75
+ def _wildcards(self, response, match):
76
+ pos = response.find("%")
77
+ while pos >= 0:
78
+ num = int(response[pos + 1 : pos + 2])
79
+ response = (
80
+ response[:pos]
81
+ + self._substitute(match.group(num))
82
+ + response[pos + 2 :]
83
+ )
84
+ pos = response.find("%")
85
+ return response
86
+
87
+ def respond(self, str):
88
+ """
89
+ Generate a response to the user input.
90
+
91
+ :type str: str
92
+ :param str: The string to be mapped
93
+ :rtype: str
94
+ """
95
+
96
+ # check each pattern
97
+ for (pattern, response) in self._pairs:
98
+ match = pattern.match(str)
99
+
100
+ # did the pattern match?
101
+ if match:
102
+ resp = random.choice(response) # pick a random response
103
+ resp = self._wildcards(resp, match) # process wildcards
104
+
105
+ # fix munged punctuation at the end
106
+ if resp[-2:] == "?.":
107
+ resp = resp[:-2] + "."
108
+ if resp[-2:] == "??":
109
+ resp = resp[:-2] + "?"
110
+ return resp
111
+
112
+ # Hold a conversation with a chatbot
113
+ def converse(self, quit="quit"):
114
+ user_input = ""
115
+ while user_input != quit:
116
+ user_input = quit
117
+ try:
118
+ user_input = input(">")
119
+ except EOFError:
120
+ print(user_input)
121
+ if user_input:
122
+ while user_input[-1] in "!.":
123
+ user_input = user_input[:-1]
124
+ print(self.respond(user_input))
env-llmeval/lib/python3.10/site-packages/nltk/chat/zen.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Zen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Amy Holland <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Zen Chatbot talks in gems of Zen wisdom.
10
+
11
+ This is a sample conversation with Zen Chatbot:
12
+ ZC: Welcome, my child.
13
+ me: Good afternoon.
14
+ ZC: Ask the question you have come to ask.
15
+ me: How can I achieve enlightenment?
16
+ ZC: How do you suppose?
17
+ me: Through meditation.
18
+ ZC: Form is emptiness, and emptiness form.
19
+ me: How can I empty my mind of worldly troubles?
20
+ ZC: Will an answer to that really help in your search for enlightenment?
21
+ me: Yes.
22
+ ZC: It is better to be right than to be certain.
23
+ me: I seek truth and wisdom.
24
+ ZC: The search for truth is a long journey.
25
+ me: Are you sure?
26
+ ZC: Maybe sure, maybe not sure.
27
+
28
+
29
+ The chatbot structure is based on that of chat.eliza. Thus, it uses
30
+ a translation table to convert from question to response
31
+ i.e. "I am" --> "you are"
32
+
33
+ Of course, since Zen Chatbot does not understand the meaning of any words,
34
+ responses are very limited. Zen Chatbot will usually answer very vaguely, or
35
+ respond to a question by asking a different question, in much the same way
36
+ as Eliza.
37
+ """
38
+
39
+ from nltk.chat.util import Chat, reflections
40
+
41
+ # responses are matched top to bottom, so non-specific matches occur later
42
+ # for each match, a list of possible responses is provided
43
+ responses = (
44
+ # Zen Chatbot opens with the line "Welcome, my child." The usual
45
+ # response will be a greeting problem: 'good' matches "good morning",
46
+ # "good day" etc, but also "good grief!" and other sentences starting
47
+ # with the word 'good' that may not be a greeting
48
+ (
49
+ r"(hello(.*))|(good [a-zA-Z]+)",
50
+ (
51
+ "The path to enlightenment is often difficult to see.",
52
+ "Greetings. I sense your mind is troubled. Tell me of your troubles.",
53
+ "Ask the question you have come to ask.",
54
+ "Hello. Do you seek englightenment?",
55
+ ),
56
+ ),
57
+ # "I need" and "I want" can be followed by a thing (eg 'help')
58
+ # or an action (eg 'to see you')
59
+ #
60
+ # This is a problem with this style of response -
61
+ # person: "I need you"
62
+ # chatbot: "me can be achieved by hard work and dedication of the mind"
63
+ # i.e. 'you' is not really a thing that can be mapped this way, so this
64
+ # interpretation only makes sense for some inputs
65
+ #
66
+ (
67
+ r"i need (.*)",
68
+ (
69
+ "%1 can be achieved by hard work and dedication of the mind.",
70
+ "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
71
+ "Focus your mind on%1, and you will find what you need.",
72
+ ),
73
+ ),
74
+ (
75
+ r"i want (.*)",
76
+ (
77
+ "Desires of the heart will distract you from the path to enlightenment.",
78
+ "Will%1 help you attain enlightenment?",
79
+ "Is%1 a desire of the mind, or of the heart?",
80
+ ),
81
+ ),
82
+ # why questions are separated into three types:
83
+ # "why..I" e.g. "why am I here?" "Why do I like cake?"
84
+ # "why..you" e.g. "why are you here?" "Why won't you tell me?"
85
+ # "why..." e.g. "Why is the sky blue?"
86
+ # problems:
87
+ # person: "Why can't you tell me?"
88
+ # chatbot: "Are you sure I tell you?"
89
+ # - this style works for positives (e.g. "why do you like cake?")
90
+ # but does not work for negatives (e.g. "why don't you like cake?")
91
+ (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")),
92
+ (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")),
93
+ (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")),
94
+ # e.g. "are you listening?", "are you a duck"
95
+ (
96
+ r"are you (.*)\?",
97
+ ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."),
98
+ ),
99
+ # e.g. "am I a duck?", "am I going to die?"
100
+ (
101
+ r"am i (.*)\?",
102
+ ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."),
103
+ ),
104
+ # what questions, e.g. "what time is it?"
105
+ # problems:
106
+ # person: "What do you want?"
107
+ # chatbot: "Seek truth, not what do me want."
108
+ (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")),
109
+ # how questions, e.g. "how do you do?"
110
+ (
111
+ r"how (.*)\?",
112
+ (
113
+ "How do you suppose?",
114
+ "Will an answer to that really help in your search for enlightenment?",
115
+ "Ask yourself not how, but why.",
116
+ ),
117
+ ),
118
+ # can questions, e.g. "can you run?", "can you come over here please?"
119
+ (
120
+ r"can you (.*)\?",
121
+ (
122
+ "I probably can, but I may not.",
123
+ "Maybe I can%1, and maybe I cannot.",
124
+ "I can do all, and I can do nothing.",
125
+ ),
126
+ ),
127
+ # can questions, e.g. "can I have some cake?", "can I know truth?"
128
+ (
129
+ r"can i (.*)\?",
130
+ (
131
+ "You can%1 if you believe you can%1, and have a pure spirit.",
132
+ "Seek truth and you will know if you can%1.",
133
+ ),
134
+ ),
135
+ # e.g. "It is raining" - implies the speaker is certain of a fact
136
+ (
137
+ r"it is (.*)",
138
+ (
139
+ "How can you be certain that%1, when you do not even know yourself?",
140
+ "Whether it is%1 or not does not change the way the world is.",
141
+ ),
142
+ ),
143
+ # e.g. "is there a doctor in the house?"
144
+ (
145
+ r"is there (.*)\?",
146
+ ("There is%1 if you believe there is.", "It is possible that there is%1."),
147
+ ),
148
+ # e.g. "is it possible?", "is this true?"
149
+ (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")),
150
+ # non-specific question
151
+ (
152
+ r"(.*)\?",
153
+ (
154
+ "Do you think %1?",
155
+ "You seek the truth. Does the truth seek you?",
156
+ "If you intentionally pursue the answers to your questions, the answers become hard to see.",
157
+ "The answer to your question cannot be told. It must be experienced.",
158
+ ),
159
+ ),
160
+ # expression of hate of form "I hate you" or "Kelly hates cheese"
161
+ (
162
+ r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)",
163
+ (
164
+ "Perhaps it is not about hating %2, but about hate from within.",
165
+ "Weeds only grow when we dislike them",
166
+ "Hate is a very strong emotion.",
167
+ ),
168
+ ),
169
+ # statement containing the word 'truth'
170
+ (
171
+ r"(.*) truth(.*)",
172
+ (
173
+ "Seek truth, and truth will seek you.",
174
+ "Remember, it is not the spoon which bends - only yourself.",
175
+ "The search for truth is a long journey.",
176
+ ),
177
+ ),
178
+ # desire to do an action
179
+ # e.g. "I want to go shopping"
180
+ (
181
+ r"i want to (.*)",
182
+ ("You may %1 if your heart truly desires to.", "You may have to %1."),
183
+ ),
184
+ # desire for an object
185
+ # e.g. "I want a pony"
186
+ (
187
+ r"i want (.*)",
188
+ (
189
+ "Does your heart truly desire %1?",
190
+ "Is this a desire of the heart, or of the mind?",
191
+ ),
192
+ ),
193
+ # e.g. "I can't wait" or "I can't do this"
194
+ (
195
+ r"i can\'t (.*)",
196
+ (
197
+ "What we can and can't do is a limitation of the mind.",
198
+ "There are limitations of the body, and limitations of the mind.",
199
+ "Have you tried to%1 with a clear mind?",
200
+ ),
201
+ ),
202
+ # "I think.." indicates uncertainty. e.g. "I think so."
203
+ # problem: exceptions...
204
+ # e.g. "I think, therefore I am"
205
+ (
206
+ r"i think (.*)",
207
+ (
208
+ "Uncertainty in an uncertain world.",
209
+ "Indeed, how can we be certain of anything in such uncertain times.",
210
+ "Are you not, in fact, certain that%1?",
211
+ ),
212
+ ),
213
+ # "I feel...emotions/sick/light-headed..."
214
+ (
215
+ r"i feel (.*)",
216
+ (
217
+ "Your body and your emotions are both symptoms of your mind."
218
+ "What do you believe is the root of such feelings?",
219
+ "Feeling%1 can be a sign of your state-of-mind.",
220
+ ),
221
+ ),
222
+ # exclaimation mark indicating emotion
223
+ # e.g. "Wow!" or "No!"
224
+ (
225
+ r"(.*)!",
226
+ (
227
+ "I sense that you are feeling emotional today.",
228
+ "You need to calm your emotions.",
229
+ ),
230
+ ),
231
+ # because [statement]
232
+ # e.g. "because I said so"
233
+ (
234
+ r"because (.*)",
235
+ (
236
+ "Does knowning the reasons behind things help you to understand"
237
+ " the things themselves?",
238
+ "If%1, what else must be true?",
239
+ ),
240
+ ),
241
+ # yes or no - raise an issue of certainty/correctness
242
+ (
243
+ r"(yes)|(no)",
244
+ (
245
+ "Is there certainty in an uncertain world?",
246
+ "It is better to be right than to be certain.",
247
+ ),
248
+ ),
249
+ # sentence containing word 'love'
250
+ (
251
+ r"(.*)love(.*)",
252
+ (
253
+ "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
254
+ "Free love!",
255
+ ),
256
+ ),
257
+ # sentence containing word 'understand' - r
258
+ (
259
+ r"(.*)understand(.*)",
260
+ (
261
+ "If you understand, things are just as they are;"
262
+ " if you do not understand, things are just as they are.",
263
+ "Imagination is more important than knowledge.",
264
+ ),
265
+ ),
266
+ # 'I', 'me', 'my' - person is talking about themself.
267
+ # this breaks down when words contain these - eg 'Thyme', 'Irish'
268
+ (
269
+ r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)",
270
+ (
271
+ "'I', 'me', 'my'... these are selfish expressions.",
272
+ "Have you ever considered that you might be a selfish person?",
273
+ "Try to consider others, not just yourself.",
274
+ "Think not just of yourself, but of others.",
275
+ ),
276
+ ),
277
+ # 'you' starting a sentence
278
+ # e.g. "you stink!"
279
+ (
280
+ r"you (.*)",
281
+ ("My path is not of concern to you.", "I am but one, and you but one more."),
282
+ ),
283
+ # say goodbye with some extra Zen wisdom.
284
+ (
285
+ r"exit",
286
+ (
287
+ "Farewell. The obstacle is the path.",
288
+ "Farewell. Life is a journey, not a destination.",
289
+ "Good bye. We are cups, constantly and quietly being filled."
290
+ "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.",
291
+ ),
292
+ ),
293
+ # fall through case -
294
+ # when stumped, respond with generic zen wisdom
295
+ #
296
+ (
297
+ r"(.*)",
298
+ (
299
+ "When you're enlightened, every word is wisdom.",
300
+ "Random talk is useless.",
301
+ "The reverse side also has a reverse side.",
302
+ "Form is emptiness, and emptiness is form.",
303
+ "I pour out a cup of water. Is the cup empty?",
304
+ ),
305
+ ),
306
+ )
307
+
308
+ zen_chatbot = Chat(responses, reflections)
309
+
310
+
311
+ def zen_chat():
312
+ print("*" * 75)
313
+ print("Zen Chatbot!".center(75))
314
+ print("*" * 75)
315
+ print('"Look beyond mere words and letters - look into your mind"'.center(75))
316
+ print("* Talk your way to truth with Zen Chatbot.")
317
+ print("* Type 'quit' when you have had enough.")
318
+ print("*" * 75)
319
+ print("Welcome, my child.")
320
+
321
+ zen_chatbot.converse()
322
+
323
+
324
+ def demo():
325
+ zen_chat()
326
+
327
+
328
+ if __name__ == "__main__":
329
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/cli.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NLTK Command-Line Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+
8
+ import click
9
+ from tqdm import tqdm
10
+
11
+ from nltk import word_tokenize
12
+ from nltk.util import parallelize_preprocess
13
+
14
+ CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
15
+
16
+
17
+ @click.group(context_settings=CONTEXT_SETTINGS)
18
+ @click.version_option()
19
+ def cli():
20
+ pass
21
+
22
+
23
+ @cli.command("tokenize")
24
+ @click.option(
25
+ "--language",
26
+ "-l",
27
+ default="en",
28
+ help="The language for the Punkt sentence tokenization.",
29
+ )
30
+ @click.option(
31
+ "--preserve-line",
32
+ "-l",
33
+ default=True,
34
+ is_flag=True,
35
+ help="An option to keep the preserve the sentence and not sentence tokenize it.",
36
+ )
37
+ @click.option("--processes", "-j", default=1, help="No. of processes.")
38
+ @click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.")
39
+ @click.option(
40
+ "--delimiter", "-d", default=" ", help="Specify delimiter to join the tokens."
41
+ )
42
+ def tokenize_file(language, preserve_line, processes, encoding, delimiter):
43
+ """This command tokenizes text stream using nltk.word_tokenize"""
44
+ with click.get_text_stream("stdin", encoding=encoding) as fin:
45
+ with click.get_text_stream("stdout", encoding=encoding) as fout:
46
+ # If it's single process, joblib parallelization is slower,
47
+ # so just process line by line normally.
48
+ if processes == 1:
49
+ for line in tqdm(fin.readlines()):
50
+ print(delimiter.join(word_tokenize(line)), end="\n", file=fout)
51
+ else:
52
+ for outline in parallelize_preprocess(
53
+ word_tokenize, fin.readlines(), processes, progress_bar=True
54
+ ):
55
+ print(delimiter.join(outline), end="\n", file=fout)
env-llmeval/lib/python3.10/site-packages/nltk/collections.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Collections
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import bisect
9
+
10
+ # this unused import is for python 2.7
11
+ from collections import Counter, defaultdict, deque
12
+ from functools import total_ordering
13
+ from itertools import chain, islice
14
+
15
+ from nltk.internals import raise_unorderable_types, slice_bounds
16
+
17
+ ##########################################################################
18
+ # Ordered Dictionary
19
+ ##########################################################################
20
+
21
+
22
+ class OrderedDict(dict):
23
+ def __init__(self, data=None, **kwargs):
24
+ self._keys = self.keys(data, kwargs.get("keys"))
25
+ self._default_factory = kwargs.get("default_factory")
26
+ if data is None:
27
+ dict.__init__(self)
28
+ else:
29
+ dict.__init__(self, data)
30
+
31
+ def __delitem__(self, key):
32
+ dict.__delitem__(self, key)
33
+ self._keys.remove(key)
34
+
35
+ def __getitem__(self, key):
36
+ try:
37
+ return dict.__getitem__(self, key)
38
+ except KeyError:
39
+ return self.__missing__(key)
40
+
41
+ def __iter__(self):
42
+ return (key for key in self.keys())
43
+
44
+ def __missing__(self, key):
45
+ if not self._default_factory and key not in self._keys:
46
+ raise KeyError()
47
+ return self._default_factory()
48
+
49
+ def __setitem__(self, key, item):
50
+ dict.__setitem__(self, key, item)
51
+ if key not in self._keys:
52
+ self._keys.append(key)
53
+
54
+ def clear(self):
55
+ dict.clear(self)
56
+ self._keys.clear()
57
+
58
+ def copy(self):
59
+ d = dict.copy(self)
60
+ d._keys = self._keys
61
+ return d
62
+
63
+ def items(self):
64
+ # returns iterator under python 3 and list under python 2
65
+ return zip(self.keys(), self.values())
66
+
67
+ def keys(self, data=None, keys=None):
68
+ if data:
69
+ if keys:
70
+ assert isinstance(keys, list)
71
+ assert len(data) == len(keys)
72
+ return keys
73
+ else:
74
+ assert (
75
+ isinstance(data, dict)
76
+ or isinstance(data, OrderedDict)
77
+ or isinstance(data, list)
78
+ )
79
+ if isinstance(data, dict) or isinstance(data, OrderedDict):
80
+ return data.keys()
81
+ elif isinstance(data, list):
82
+ return [key for (key, value) in data]
83
+ elif "_keys" in self.__dict__:
84
+ return self._keys
85
+ else:
86
+ return []
87
+
88
+ def popitem(self):
89
+ if not self._keys:
90
+ raise KeyError()
91
+
92
+ key = self._keys.pop()
93
+ value = self[key]
94
+ del self[key]
95
+ return (key, value)
96
+
97
+ def setdefault(self, key, failobj=None):
98
+ dict.setdefault(self, key, failobj)
99
+ if key not in self._keys:
100
+ self._keys.append(key)
101
+
102
+ def update(self, data):
103
+ dict.update(self, data)
104
+ for key in self.keys(data):
105
+ if key not in self._keys:
106
+ self._keys.append(key)
107
+
108
+ def values(self):
109
+ # returns iterator under python 3
110
+ return map(self.get, self._keys)
111
+
112
+
113
+ ######################################################################
114
+ # Lazy Sequences
115
+ ######################################################################
116
+
117
+
118
+ @total_ordering
119
+ class AbstractLazySequence:
120
+ """
121
+ An abstract base class for read-only sequences whose values are
122
+ computed as needed. Lazy sequences act like tuples -- they can be
123
+ indexed, sliced, and iterated over; but they may not be modified.
124
+
125
+ The most common application of lazy sequences in NLTK is for
126
+ corpus view objects, which provide access to the contents of a
127
+ corpus without loading the entire corpus into memory, by loading
128
+ pieces of the corpus from disk as needed.
129
+
130
+ The result of modifying a mutable element of a lazy sequence is
131
+ undefined. In particular, the modifications made to the element
132
+ may or may not persist, depending on whether and when the lazy
133
+ sequence caches that element's value or reconstructs it from
134
+ scratch.
135
+
136
+ Subclasses are required to define two methods: ``__len__()``
137
+ and ``iterate_from()``.
138
+ """
139
+
140
+ def __len__(self):
141
+ """
142
+ Return the number of tokens in the corpus file underlying this
143
+ corpus view.
144
+ """
145
+ raise NotImplementedError("should be implemented by subclass")
146
+
147
+ def iterate_from(self, start):
148
+ """
149
+ Return an iterator that generates the tokens in the corpus
150
+ file underlying this corpus view, starting at the token number
151
+ ``start``. If ``start>=len(self)``, then this iterator will
152
+ generate no tokens.
153
+ """
154
+ raise NotImplementedError("should be implemented by subclass")
155
+
156
+ def __getitem__(self, i):
157
+ """
158
+ Return the *i* th token in the corpus file underlying this
159
+ corpus view. Negative indices and spans are both supported.
160
+ """
161
+ if isinstance(i, slice):
162
+ start, stop = slice_bounds(self, i)
163
+ return LazySubsequence(self, start, stop)
164
+ else:
165
+ # Handle negative indices
166
+ if i < 0:
167
+ i += len(self)
168
+ if i < 0:
169
+ raise IndexError("index out of range")
170
+ # Use iterate_from to extract it.
171
+ try:
172
+ return next(self.iterate_from(i))
173
+ except StopIteration as e:
174
+ raise IndexError("index out of range") from e
175
+
176
+ def __iter__(self):
177
+ """Return an iterator that generates the tokens in the corpus
178
+ file underlying this corpus view."""
179
+ return self.iterate_from(0)
180
+
181
+ def count(self, value):
182
+ """Return the number of times this list contains ``value``."""
183
+ return sum(1 for elt in self if elt == value)
184
+
185
+ def index(self, value, start=None, stop=None):
186
+ """Return the index of the first occurrence of ``value`` in this
187
+ list that is greater than or equal to ``start`` and less than
188
+ ``stop``. Negative start and stop values are treated like negative
189
+ slice bounds -- i.e., they count from the end of the list."""
190
+ start, stop = slice_bounds(self, slice(start, stop))
191
+ for i, elt in enumerate(islice(self, start, stop)):
192
+ if elt == value:
193
+ return i + start
194
+ raise ValueError("index(x): x not in list")
195
+
196
+ def __contains__(self, value):
197
+ """Return true if this list contains ``value``."""
198
+ return bool(self.count(value))
199
+
200
+ def __add__(self, other):
201
+ """Return a list concatenating self with other."""
202
+ return LazyConcatenation([self, other])
203
+
204
+ def __radd__(self, other):
205
+ """Return a list concatenating other with self."""
206
+ return LazyConcatenation([other, self])
207
+
208
+ def __mul__(self, count):
209
+ """Return a list concatenating self with itself ``count`` times."""
210
+ return LazyConcatenation([self] * count)
211
+
212
+ def __rmul__(self, count):
213
+ """Return a list concatenating self with itself ``count`` times."""
214
+ return LazyConcatenation([self] * count)
215
+
216
+ _MAX_REPR_SIZE = 60
217
+
218
+ def __repr__(self):
219
+ """
220
+ Return a string representation for this corpus view that is
221
+ similar to a list's representation; but if it would be more
222
+ than 60 characters long, it is truncated.
223
+ """
224
+ pieces = []
225
+ length = 5
226
+ for elt in self:
227
+ pieces.append(repr(elt))
228
+ length += len(pieces[-1]) + 2
229
+ if length > self._MAX_REPR_SIZE and len(pieces) > 2:
230
+ return "[%s, ...]" % ", ".join(pieces[:-1])
231
+ return "[%s]" % ", ".join(pieces)
232
+
233
+ def __eq__(self, other):
234
+ return type(self) == type(other) and list(self) == list(other)
235
+
236
+ def __ne__(self, other):
237
+ return not self == other
238
+
239
+ def __lt__(self, other):
240
+ if type(other) != type(self):
241
+ raise_unorderable_types("<", self, other)
242
+ return list(self) < list(other)
243
+
244
+ def __hash__(self):
245
+ """
246
+ :raise ValueError: Corpus view objects are unhashable.
247
+ """
248
+ raise ValueError("%s objects are unhashable" % self.__class__.__name__)
249
+
250
+
251
+ class LazySubsequence(AbstractLazySequence):
252
+ """
253
+ A subsequence produced by slicing a lazy sequence. This slice
254
+ keeps a reference to its source sequence, and generates its values
255
+ by looking them up in the source sequence.
256
+ """
257
+
258
+ MIN_SIZE = 100
259
+ """
260
+ The minimum size for which lazy slices should be created. If
261
+ ``LazySubsequence()`` is called with a subsequence that is
262
+ shorter than ``MIN_SIZE``, then a tuple will be returned instead.
263
+ """
264
+
265
+ def __new__(cls, source, start, stop):
266
+ """
267
+ Construct a new slice from a given underlying sequence. The
268
+ ``start`` and ``stop`` indices should be absolute indices --
269
+ i.e., they should not be negative (for indexing from the back
270
+ of a list) or greater than the length of ``source``.
271
+ """
272
+ # If the slice is small enough, just use a tuple.
273
+ if stop - start < cls.MIN_SIZE:
274
+ return list(islice(source.iterate_from(start), stop - start))
275
+ else:
276
+ return object.__new__(cls)
277
+
278
+ def __init__(self, source, start, stop):
279
+ self._source = source
280
+ self._start = start
281
+ self._stop = stop
282
+
283
+ def __len__(self):
284
+ return self._stop - self._start
285
+
286
+ def iterate_from(self, start):
287
+ return islice(
288
+ self._source.iterate_from(start + self._start), max(0, len(self) - start)
289
+ )
290
+
291
+
292
+ class LazyConcatenation(AbstractLazySequence):
293
+ """
294
+ A lazy sequence formed by concatenating a list of lists. This
295
+ underlying list of lists may itself be lazy. ``LazyConcatenation``
296
+ maintains an index that it uses to keep track of the relationship
297
+ between offsets in the concatenated lists and offsets in the
298
+ sublists.
299
+ """
300
+
301
+ def __init__(self, list_of_lists):
302
+ self._list = list_of_lists
303
+ self._offsets = [0]
304
+
305
+ def __len__(self):
306
+ if len(self._offsets) <= len(self._list):
307
+ for _ in self.iterate_from(self._offsets[-1]):
308
+ pass
309
+ return self._offsets[-1]
310
+
311
+ def iterate_from(self, start_index):
312
+ if start_index < self._offsets[-1]:
313
+ sublist_index = bisect.bisect_right(self._offsets, start_index) - 1
314
+ else:
315
+ sublist_index = len(self._offsets) - 1
316
+
317
+ index = self._offsets[sublist_index]
318
+
319
+ # Construct an iterator over the sublists.
320
+ if isinstance(self._list, AbstractLazySequence):
321
+ sublist_iter = self._list.iterate_from(sublist_index)
322
+ else:
323
+ sublist_iter = islice(self._list, sublist_index, None)
324
+
325
+ for sublist in sublist_iter:
326
+ if sublist_index == (len(self._offsets) - 1):
327
+ assert (
328
+ index + len(sublist) >= self._offsets[-1]
329
+ ), "offsets not monotonic increasing!"
330
+ self._offsets.append(index + len(sublist))
331
+ else:
332
+ assert self._offsets[sublist_index + 1] == index + len(
333
+ sublist
334
+ ), "inconsistent list value (num elts)"
335
+
336
+ yield from sublist[max(0, start_index - index) :]
337
+
338
+ index += len(sublist)
339
+ sublist_index += 1
340
+
341
+
342
+ class LazyMap(AbstractLazySequence):
343
+ """
344
+ A lazy sequence whose elements are formed by applying a given
345
+ function to each element in one or more underlying lists. The
346
+ function is applied lazily -- i.e., when you read a value from the
347
+ list, ``LazyMap`` will calculate that value by applying its
348
+ function to the underlying lists' value(s). ``LazyMap`` is
349
+ essentially a lazy version of the Python primitive function
350
+ ``map``. In particular, the following two expressions are
351
+ equivalent:
352
+
353
+ >>> from nltk.collections import LazyMap
354
+ >>> function = str
355
+ >>> sequence = [1,2,3]
356
+ >>> map(function, sequence) # doctest: +SKIP
357
+ ['1', '2', '3']
358
+ >>> list(LazyMap(function, sequence))
359
+ ['1', '2', '3']
360
+
361
+ Like the Python ``map`` primitive, if the source lists do not have
362
+ equal size, then the value None will be supplied for the
363
+ 'missing' elements.
364
+
365
+ Lazy maps can be useful for conserving memory, in cases where
366
+ individual values take up a lot of space. This is especially true
367
+ if the underlying list's values are constructed lazily, as is the
368
+ case with many corpus readers.
369
+
370
+ A typical example of a use case for this class is performing
371
+ feature detection on the tokens in a corpus. Since featuresets
372
+ are encoded as dictionaries, which can take up a lot of memory,
373
+ using a ``LazyMap`` can significantly reduce memory usage when
374
+ training and running classifiers.
375
+ """
376
+
377
+ def __init__(self, function, *lists, **config):
378
+ """
379
+ :param function: The function that should be applied to
380
+ elements of ``lists``. It should take as many arguments
381
+ as there are ``lists``.
382
+ :param lists: The underlying lists.
383
+ :param cache_size: Determines the size of the cache used
384
+ by this lazy map. (default=5)
385
+ """
386
+ if not lists:
387
+ raise TypeError("LazyMap requires at least two args")
388
+
389
+ self._lists = lists
390
+ self._func = function
391
+ self._cache_size = config.get("cache_size", 5)
392
+ self._cache = {} if self._cache_size > 0 else None
393
+
394
+ # If you just take bool() of sum() here _all_lazy will be true just
395
+ # in case n >= 1 list is an AbstractLazySequence. Presumably this
396
+ # isn't what's intended.
397
+ self._all_lazy = sum(
398
+ isinstance(lst, AbstractLazySequence) for lst in lists
399
+ ) == len(lists)
400
+
401
+ def iterate_from(self, index):
402
+ # Special case: one lazy sublist
403
+ if len(self._lists) == 1 and self._all_lazy:
404
+ for value in self._lists[0].iterate_from(index):
405
+ yield self._func(value)
406
+ return
407
+
408
+ # Special case: one non-lazy sublist
409
+ elif len(self._lists) == 1:
410
+ while True:
411
+ try:
412
+ yield self._func(self._lists[0][index])
413
+ except IndexError:
414
+ return
415
+ index += 1
416
+
417
+ # Special case: n lazy sublists
418
+ elif self._all_lazy:
419
+ iterators = [lst.iterate_from(index) for lst in self._lists]
420
+ while True:
421
+ elements = []
422
+ for iterator in iterators:
423
+ try:
424
+ elements.append(next(iterator))
425
+ except: # FIXME: What is this except really catching? StopIteration?
426
+ elements.append(None)
427
+ if elements == [None] * len(self._lists):
428
+ return
429
+ yield self._func(*elements)
430
+ index += 1
431
+
432
+ # general case
433
+ else:
434
+ while True:
435
+ try:
436
+ elements = [lst[index] for lst in self._lists]
437
+ except IndexError:
438
+ elements = [None] * len(self._lists)
439
+ for i, lst in enumerate(self._lists):
440
+ try:
441
+ elements[i] = lst[index]
442
+ except IndexError:
443
+ pass
444
+ if elements == [None] * len(self._lists):
445
+ return
446
+ yield self._func(*elements)
447
+ index += 1
448
+
449
+ def __getitem__(self, index):
450
+ if isinstance(index, slice):
451
+ sliced_lists = [lst[index] for lst in self._lists]
452
+ return LazyMap(self._func, *sliced_lists)
453
+ else:
454
+ # Handle negative indices
455
+ if index < 0:
456
+ index += len(self)
457
+ if index < 0:
458
+ raise IndexError("index out of range")
459
+ # Check the cache
460
+ if self._cache is not None and index in self._cache:
461
+ return self._cache[index]
462
+ # Calculate the value
463
+ try:
464
+ val = next(self.iterate_from(index))
465
+ except StopIteration as e:
466
+ raise IndexError("index out of range") from e
467
+ # Update the cache
468
+ if self._cache is not None:
469
+ if len(self._cache) > self._cache_size:
470
+ self._cache.popitem() # discard random entry
471
+ self._cache[index] = val
472
+ # Return the value
473
+ return val
474
+
475
+ def __len__(self):
476
+ return max(len(lst) for lst in self._lists)
477
+
478
+
479
+ class LazyZip(LazyMap):
480
+ """
481
+ A lazy sequence whose elements are tuples, each containing the i-th
482
+ element from each of the argument sequences. The returned list is
483
+ truncated in length to the length of the shortest argument sequence. The
484
+ tuples are constructed lazily -- i.e., when you read a value from the
485
+ list, ``LazyZip`` will calculate that value by forming a tuple from
486
+ the i-th element of each of the argument sequences.
487
+
488
+ ``LazyZip`` is essentially a lazy version of the Python primitive function
489
+ ``zip``. In particular, an evaluated LazyZip is equivalent to a zip:
490
+
491
+ >>> from nltk.collections import LazyZip
492
+ >>> sequence1, sequence2 = [1, 2, 3], ['a', 'b', 'c']
493
+ >>> zip(sequence1, sequence2) # doctest: +SKIP
494
+ [(1, 'a'), (2, 'b'), (3, 'c')]
495
+ >>> list(LazyZip(sequence1, sequence2))
496
+ [(1, 'a'), (2, 'b'), (3, 'c')]
497
+ >>> sequences = [sequence1, sequence2, [6,7,8,9]]
498
+ >>> list(zip(*sequences)) == list(LazyZip(*sequences))
499
+ True
500
+
501
+ Lazy zips can be useful for conserving memory in cases where the argument
502
+ sequences are particularly long.
503
+
504
+ A typical example of a use case for this class is combining long sequences
505
+ of gold standard and predicted values in a classification or tagging task
506
+ in order to calculate accuracy. By constructing tuples lazily and
507
+ avoiding the creation of an additional long sequence, memory usage can be
508
+ significantly reduced.
509
+ """
510
+
511
+ def __init__(self, *lists):
512
+ """
513
+ :param lists: the underlying lists
514
+ :type lists: list(list)
515
+ """
516
+ LazyMap.__init__(self, lambda *elts: elts, *lists)
517
+
518
+ def iterate_from(self, index):
519
+ iterator = LazyMap.iterate_from(self, index)
520
+ while index < len(self):
521
+ yield next(iterator)
522
+ index += 1
523
+ return
524
+
525
+ def __len__(self):
526
+ return min(len(lst) for lst in self._lists)
527
+
528
+
529
+ class LazyEnumerate(LazyZip):
530
+ """
531
+ A lazy sequence whose elements are tuples, each containing a count (from
532
+ zero) and a value yielded by underlying sequence. ``LazyEnumerate`` is
533
+ useful for obtaining an indexed list. The tuples are constructed lazily
534
+ -- i.e., when you read a value from the list, ``LazyEnumerate`` will
535
+ calculate that value by forming a tuple from the count of the i-th
536
+ element and the i-th element of the underlying sequence.
537
+
538
+ ``LazyEnumerate`` is essentially a lazy version of the Python primitive
539
+ function ``enumerate``. In particular, the following two expressions are
540
+ equivalent:
541
+
542
+ >>> from nltk.collections import LazyEnumerate
543
+ >>> sequence = ['first', 'second', 'third']
544
+ >>> list(enumerate(sequence))
545
+ [(0, 'first'), (1, 'second'), (2, 'third')]
546
+ >>> list(LazyEnumerate(sequence))
547
+ [(0, 'first'), (1, 'second'), (2, 'third')]
548
+
549
+ Lazy enumerations can be useful for conserving memory in cases where the
550
+ argument sequences are particularly long.
551
+
552
+ A typical example of a use case for this class is obtaining an indexed
553
+ list for a long sequence of values. By constructing tuples lazily and
554
+ avoiding the creation of an additional long sequence, memory usage can be
555
+ significantly reduced.
556
+ """
557
+
558
+ def __init__(self, lst):
559
+ """
560
+ :param lst: the underlying list
561
+ :type lst: list
562
+ """
563
+ LazyZip.__init__(self, range(len(lst)), lst)
564
+
565
+
566
+ class LazyIteratorList(AbstractLazySequence):
567
+ """
568
+ Wraps an iterator, loading its elements on demand
569
+ and making them subscriptable.
570
+ __repr__ displays only the first few elements.
571
+ """
572
+
573
+ def __init__(self, it, known_len=None):
574
+ self._it = it
575
+ self._len = known_len
576
+ self._cache = []
577
+
578
+ def __len__(self):
579
+ if self._len:
580
+ return self._len
581
+ for _ in self.iterate_from(len(self._cache)):
582
+ pass
583
+ self._len = len(self._cache)
584
+ return self._len
585
+
586
+ def iterate_from(self, start):
587
+ """Create a new iterator over this list starting at the given offset."""
588
+ while len(self._cache) < start:
589
+ v = next(self._it)
590
+ self._cache.append(v)
591
+ i = start
592
+ while i < len(self._cache):
593
+ yield self._cache[i]
594
+ i += 1
595
+ try:
596
+ while True:
597
+ v = next(self._it)
598
+ self._cache.append(v)
599
+ yield v
600
+ except StopIteration:
601
+ pass
602
+
603
+ def __add__(self, other):
604
+ """Return a list concatenating self with other."""
605
+ return type(self)(chain(self, other))
606
+
607
+ def __radd__(self, other):
608
+ """Return a list concatenating other with self."""
609
+ return type(self)(chain(other, self))
610
+
611
+
612
+ ######################################################################
613
+ # Trie Implementation
614
+ ######################################################################
615
+ class Trie(dict):
616
+ """A Trie implementation for strings"""
617
+
618
+ LEAF = True
619
+
620
+ def __init__(self, strings=None):
621
+ """Builds a Trie object, which is built around a ``dict``
622
+
623
+ If ``strings`` is provided, it will add the ``strings``, which
624
+ consist of a ``list`` of ``strings``, to the Trie.
625
+ Otherwise, it'll construct an empty Trie.
626
+
627
+ :param strings: List of strings to insert into the trie
628
+ (Default is ``None``)
629
+ :type strings: list(str)
630
+
631
+ """
632
+ super().__init__()
633
+ if strings:
634
+ for string in strings:
635
+ self.insert(string)
636
+
637
+ def insert(self, string):
638
+ """Inserts ``string`` into the Trie
639
+
640
+ :param string: String to insert into the trie
641
+ :type string: str
642
+
643
+ :Example:
644
+
645
+ >>> from nltk.collections import Trie
646
+ >>> trie = Trie(["abc", "def"])
647
+ >>> expected = {'a': {'b': {'c': {True: None}}}, \
648
+ 'd': {'e': {'f': {True: None}}}}
649
+ >>> trie == expected
650
+ True
651
+
652
+ """
653
+ if len(string):
654
+ self[string[0]].insert(string[1:])
655
+ else:
656
+ # mark the string is complete
657
+ self[Trie.LEAF] = None
658
+
659
+ def __missing__(self, key):
660
+ self[key] = Trie()
661
+ return self[key]
env-llmeval/lib/python3.10/site-packages/nltk/collocations.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Collocations and Association Measures
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joel Nothman <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+ """
9
+ Tools to identify collocations --- words that often appear consecutively
10
+ --- within corpora. They may also be used to find other associations between
11
+ word occurrences.
12
+ See Manning and Schutze ch. 5 at https://nlp.stanford.edu/fsnlp/promo/colloc.pdf
13
+ and the Text::NSP Perl package at http://ngram.sourceforge.net
14
+
15
+ Finding collocations requires first calculating the frequencies of words and
16
+ their appearance in the context of other words. Often the collection of words
17
+ will then requiring filtering to only retain useful content terms. Each ngram
18
+ of words may then be scored according to some association measure, in order
19
+ to determine the relative likelihood of each ngram being a collocation.
20
+
21
+ The ``BigramCollocationFinder`` and ``TrigramCollocationFinder`` classes provide
22
+ these functionalities, dependent on being provided a function which scores a
23
+ ngram given appropriate frequency counts. A number of standard association
24
+ measures are provided in bigram_measures and trigram_measures.
25
+ """
26
+
27
+ # Possible TODOs:
28
+ # - consider the distinction between f(x,_) and f(x) and whether our
29
+ # approximation is good enough for fragmented data, and mention it
30
+ # - add a n-gram collocation finder with measures which only utilise n-gram
31
+ # and unigram counts (raw_freq, pmi, student_t)
32
+
33
+ import itertools as _itertools
34
+
35
+ # these two unused imports are referenced in collocations.doctest
36
+ from nltk.metrics import (
37
+ BigramAssocMeasures,
38
+ ContingencyMeasures,
39
+ QuadgramAssocMeasures,
40
+ TrigramAssocMeasures,
41
+ )
42
+ from nltk.metrics.spearman import ranks_from_scores, spearman_correlation
43
+ from nltk.probability import FreqDist
44
+ from nltk.util import ngrams
45
+
46
+
47
+ class AbstractCollocationFinder:
48
+ """
49
+ An abstract base class for collocation finders whose purpose is to
50
+ collect collocation candidate frequencies, filter and rank them.
51
+
52
+ As a minimum, collocation finders require the frequencies of each
53
+ word in a corpus, and the joint frequency of word tuples. This data
54
+ should be provided through nltk.probability.FreqDist objects or an
55
+ identical interface.
56
+ """
57
+
58
+ def __init__(self, word_fd, ngram_fd):
59
+ self.word_fd = word_fd
60
+ self.N = word_fd.N()
61
+ self.ngram_fd = ngram_fd
62
+
63
+ @classmethod
64
+ def _build_new_documents(
65
+ cls, documents, window_size, pad_left=False, pad_right=False, pad_symbol=None
66
+ ):
67
+ """
68
+ Pad the document with the place holder according to the window_size
69
+ """
70
+ padding = (pad_symbol,) * (window_size - 1)
71
+ if pad_right:
72
+ return _itertools.chain.from_iterable(
73
+ _itertools.chain(doc, padding) for doc in documents
74
+ )
75
+ if pad_left:
76
+ return _itertools.chain.from_iterable(
77
+ _itertools.chain(padding, doc) for doc in documents
78
+ )
79
+
80
+ @classmethod
81
+ def from_documents(cls, documents):
82
+ """Constructs a collocation finder given a collection of documents,
83
+ each of which is a list (or iterable) of tokens.
84
+ """
85
+ # return cls.from_words(_itertools.chain(*documents))
86
+ return cls.from_words(
87
+ cls._build_new_documents(documents, cls.default_ws, pad_right=True)
88
+ )
89
+
90
+ @staticmethod
91
+ def _ngram_freqdist(words, n):
92
+ return FreqDist(tuple(words[i : i + n]) for i in range(len(words) - 1))
93
+
94
+ def _apply_filter(self, fn=lambda ngram, freq: False):
95
+ """Generic filter removes ngrams from the frequency distribution
96
+ if the function returns True when passed an ngram tuple.
97
+ """
98
+ tmp_ngram = FreqDist()
99
+ for ngram, freq in self.ngram_fd.items():
100
+ if not fn(ngram, freq):
101
+ tmp_ngram[ngram] = freq
102
+ self.ngram_fd = tmp_ngram
103
+
104
+ def apply_freq_filter(self, min_freq):
105
+ """Removes candidate ngrams which have frequency less than min_freq."""
106
+ self._apply_filter(lambda ng, freq: freq < min_freq)
107
+
108
+ def apply_ngram_filter(self, fn):
109
+ """Removes candidate ngrams (w1, w2, ...) where fn(w1, w2, ...)
110
+ evaluates to True.
111
+ """
112
+ self._apply_filter(lambda ng, f: fn(*ng))
113
+
114
+ def apply_word_filter(self, fn):
115
+ """Removes candidate ngrams (w1, w2, ...) where any of (fn(w1), fn(w2),
116
+ ...) evaluates to True.
117
+ """
118
+ self._apply_filter(lambda ng, f: any(fn(w) for w in ng))
119
+
120
+ def _score_ngrams(self, score_fn):
121
+ """Generates of (ngram, score) pairs as determined by the scoring
122
+ function provided.
123
+ """
124
+ for tup in self.ngram_fd:
125
+ score = self.score_ngram(score_fn, *tup)
126
+ if score is not None:
127
+ yield tup, score
128
+
129
+ def score_ngrams(self, score_fn):
130
+ """Returns a sequence of (ngram, score) pairs ordered from highest to
131
+ lowest score, as determined by the scoring function provided.
132
+ """
133
+ return sorted(self._score_ngrams(score_fn), key=lambda t: (-t[1], t[0]))
134
+
135
+ def nbest(self, score_fn, n):
136
+ """Returns the top n ngrams when scored by the given function."""
137
+ return [p for p, s in self.score_ngrams(score_fn)[:n]]
138
+
139
+ def above_score(self, score_fn, min_score):
140
+ """Returns a sequence of ngrams, ordered by decreasing score, whose
141
+ scores each exceed the given minimum score.
142
+ """
143
+ for ngram, score in self.score_ngrams(score_fn):
144
+ if score > min_score:
145
+ yield ngram
146
+ else:
147
+ break
148
+
149
+
150
+ class BigramCollocationFinder(AbstractCollocationFinder):
151
+ """A tool for the finding and ranking of bigram collocations or other
152
+ association measures. It is often useful to use from_words() rather than
153
+ constructing an instance directly.
154
+ """
155
+
156
+ default_ws = 2
157
+
158
+ def __init__(self, word_fd, bigram_fd, window_size=2):
159
+ """Construct a BigramCollocationFinder, given FreqDists for
160
+ appearances of words and (possibly non-contiguous) bigrams.
161
+ """
162
+ AbstractCollocationFinder.__init__(self, word_fd, bigram_fd)
163
+ self.window_size = window_size
164
+
165
+ @classmethod
166
+ def from_words(cls, words, window_size=2):
167
+ """Construct a BigramCollocationFinder for all bigrams in the given
168
+ sequence. When window_size > 2, count non-contiguous bigrams, in the
169
+ style of Church and Hanks's (1990) association ratio.
170
+ """
171
+ wfd = FreqDist()
172
+ bfd = FreqDist()
173
+
174
+ if window_size < 2:
175
+ raise ValueError("Specify window_size at least 2")
176
+
177
+ for window in ngrams(words, window_size, pad_right=True):
178
+ w1 = window[0]
179
+ if w1 is None:
180
+ continue
181
+ wfd[w1] += 1
182
+ for w2 in window[1:]:
183
+ if w2 is not None:
184
+ bfd[(w1, w2)] += 1
185
+ return cls(wfd, bfd, window_size=window_size)
186
+
187
+ def score_ngram(self, score_fn, w1, w2):
188
+ """Returns the score for a given bigram using the given scoring
189
+ function. Following Church and Hanks (1990), counts are scaled by
190
+ a factor of 1/(window_size - 1).
191
+ """
192
+ n_all = self.N
193
+ n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0)
194
+ if not n_ii:
195
+ return
196
+ n_ix = self.word_fd[w1]
197
+ n_xi = self.word_fd[w2]
198
+ return score_fn(n_ii, (n_ix, n_xi), n_all)
199
+
200
+
201
+ class TrigramCollocationFinder(AbstractCollocationFinder):
202
+ """A tool for the finding and ranking of trigram collocations or other
203
+ association measures. It is often useful to use from_words() rather than
204
+ constructing an instance directly.
205
+ """
206
+
207
+ default_ws = 3
208
+
209
+ def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd):
210
+ """Construct a TrigramCollocationFinder, given FreqDists for
211
+ appearances of words, bigrams, two words with any word between them,
212
+ and trigrams.
213
+ """
214
+ AbstractCollocationFinder.__init__(self, word_fd, trigram_fd)
215
+ self.wildcard_fd = wildcard_fd
216
+ self.bigram_fd = bigram_fd
217
+
218
+ @classmethod
219
+ def from_words(cls, words, window_size=3):
220
+ """Construct a TrigramCollocationFinder for all trigrams in the given
221
+ sequence.
222
+ """
223
+ if window_size < 3:
224
+ raise ValueError("Specify window_size at least 3")
225
+
226
+ wfd = FreqDist()
227
+ wildfd = FreqDist()
228
+ bfd = FreqDist()
229
+ tfd = FreqDist()
230
+ for window in ngrams(words, window_size, pad_right=True):
231
+ w1 = window[0]
232
+ if w1 is None:
233
+ continue
234
+ for w2, w3 in _itertools.combinations(window[1:], 2):
235
+ wfd[w1] += 1
236
+ if w2 is None:
237
+ continue
238
+ bfd[(w1, w2)] += 1
239
+ if w3 is None:
240
+ continue
241
+ wildfd[(w1, w3)] += 1
242
+ tfd[(w1, w2, w3)] += 1
243
+ return cls(wfd, bfd, wildfd, tfd)
244
+
245
+ def bigram_finder(self):
246
+ """Constructs a bigram collocation finder with the bigram and unigram
247
+ data from this finder. Note that this does not include any filtering
248
+ applied to this finder.
249
+ """
250
+ return BigramCollocationFinder(self.word_fd, self.bigram_fd)
251
+
252
+ def score_ngram(self, score_fn, w1, w2, w3):
253
+ """Returns the score for a given trigram using the given scoring
254
+ function.
255
+ """
256
+ n_all = self.N
257
+ n_iii = self.ngram_fd[(w1, w2, w3)]
258
+ if not n_iii:
259
+ return
260
+ n_iix = self.bigram_fd[(w1, w2)]
261
+ n_ixi = self.wildcard_fd[(w1, w3)]
262
+ n_xii = self.bigram_fd[(w2, w3)]
263
+ n_ixx = self.word_fd[w1]
264
+ n_xix = self.word_fd[w2]
265
+ n_xxi = self.word_fd[w3]
266
+ return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all)
267
+
268
+
269
+ class QuadgramCollocationFinder(AbstractCollocationFinder):
270
+ """A tool for the finding and ranking of quadgram collocations or other association measures.
271
+ It is often useful to use from_words() rather than constructing an instance directly.
272
+ """
273
+
274
+ default_ws = 4
275
+
276
+ def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii):
277
+ """Construct a QuadgramCollocationFinder, given FreqDists for appearances of words,
278
+ bigrams, trigrams, two words with one word and two words between them, three words
279
+ with a word between them in both variations.
280
+ """
281
+ AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd)
282
+ self.iii = iii
283
+ self.ii = ii
284
+ self.ixi = ixi
285
+ self.ixxi = ixxi
286
+ self.iixi = iixi
287
+ self.ixii = ixii
288
+
289
+ @classmethod
290
+ def from_words(cls, words, window_size=4):
291
+ if window_size < 4:
292
+ raise ValueError("Specify window_size at least 4")
293
+ ixxx = FreqDist()
294
+ iiii = FreqDist()
295
+ ii = FreqDist()
296
+ iii = FreqDist()
297
+ ixi = FreqDist()
298
+ ixxi = FreqDist()
299
+ iixi = FreqDist()
300
+ ixii = FreqDist()
301
+
302
+ for window in ngrams(words, window_size, pad_right=True):
303
+ w1 = window[0]
304
+ if w1 is None:
305
+ continue
306
+ for w2, w3, w4 in _itertools.combinations(window[1:], 3):
307
+ ixxx[w1] += 1
308
+ if w2 is None:
309
+ continue
310
+ ii[(w1, w2)] += 1
311
+ if w3 is None:
312
+ continue
313
+ iii[(w1, w2, w3)] += 1
314
+ ixi[(w1, w3)] += 1
315
+ if w4 is None:
316
+ continue
317
+ iiii[(w1, w2, w3, w4)] += 1
318
+ ixxi[(w1, w4)] += 1
319
+ ixii[(w1, w3, w4)] += 1
320
+ iixi[(w1, w2, w4)] += 1
321
+
322
+ return cls(ixxx, iiii, ii, iii, ixi, ixxi, iixi, ixii)
323
+
324
+ def score_ngram(self, score_fn, w1, w2, w3, w4):
325
+ n_all = self.N
326
+ n_iiii = self.ngram_fd[(w1, w2, w3, w4)]
327
+ if not n_iiii:
328
+ return
329
+ n_iiix = self.iii[(w1, w2, w3)]
330
+ n_xiii = self.iii[(w2, w3, w4)]
331
+ n_iixi = self.iixi[(w1, w2, w4)]
332
+ n_ixii = self.ixii[(w1, w3, w4)]
333
+
334
+ n_iixx = self.ii[(w1, w2)]
335
+ n_xxii = self.ii[(w3, w4)]
336
+ n_xiix = self.ii[(w2, w3)]
337
+ n_ixix = self.ixi[(w1, w3)]
338
+ n_ixxi = self.ixxi[(w1, w4)]
339
+ n_xixi = self.ixi[(w2, w4)]
340
+
341
+ n_ixxx = self.word_fd[w1]
342
+ n_xixx = self.word_fd[w2]
343
+ n_xxix = self.word_fd[w3]
344
+ n_xxxi = self.word_fd[w4]
345
+ return score_fn(
346
+ n_iiii,
347
+ (n_iiix, n_iixi, n_ixii, n_xiii),
348
+ (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
349
+ (n_ixxx, n_xixx, n_xxix, n_xxxi),
350
+ n_all,
351
+ )
352
+
353
+
354
+ def demo(scorer=None, compare_scorer=None):
355
+ """Finds bigram collocations in the files of the WebText corpus."""
356
+ from nltk.metrics import (
357
+ BigramAssocMeasures,
358
+ ranks_from_scores,
359
+ spearman_correlation,
360
+ )
361
+
362
+ if scorer is None:
363
+ scorer = BigramAssocMeasures.likelihood_ratio
364
+ if compare_scorer is None:
365
+ compare_scorer = BigramAssocMeasures.raw_freq
366
+
367
+ from nltk.corpus import stopwords, webtext
368
+
369
+ ignored_words = stopwords.words("english")
370
+ word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words
371
+
372
+ for file in webtext.fileids():
373
+ words = [word.lower() for word in webtext.words(file)]
374
+
375
+ cf = BigramCollocationFinder.from_words(words)
376
+ cf.apply_freq_filter(3)
377
+ cf.apply_word_filter(word_filter)
378
+
379
+ corr = spearman_correlation(
380
+ ranks_from_scores(cf.score_ngrams(scorer)),
381
+ ranks_from_scores(cf.score_ngrams(compare_scorer)),
382
+ )
383
+ print(file)
384
+ print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)])
385
+ print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}")
386
+
387
+
388
+ # Slows down loading too much
389
+ # bigram_measures = BigramAssocMeasures()
390
+ # trigram_measures = TrigramAssocMeasures()
391
+
392
+ if __name__ == "__main__":
393
+ import sys
394
+
395
+ from nltk.metrics import BigramAssocMeasures
396
+
397
+ try:
398
+ scorer = eval("BigramAssocMeasures." + sys.argv[1])
399
+ except IndexError:
400
+ scorer = None
401
+ try:
402
+ compare_scorer = eval("BigramAssocMeasures." + sys.argv[2])
403
+ except IndexError:
404
+ compare_scorer = None
405
+
406
+ demo(scorer, compare_scorer)
407
+
408
+ __all__ = [
409
+ "BigramCollocationFinder",
410
+ "TrigramCollocationFinder",
411
+ "QuadgramCollocationFinder",
412
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/compat.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Compatibility
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ #
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import os
9
+ from functools import wraps
10
+
11
+ # ======= Compatibility for datasets that care about Python versions ========
12
+
13
+ # The following datasets have a /PY3 subdirectory containing
14
+ # a full copy of the data which has been re-encoded or repickled.
15
+ DATA_UPDATES = [
16
+ ("chunkers", "maxent_ne_chunker"),
17
+ ("help", "tagsets"),
18
+ ("taggers", "maxent_treebank_pos_tagger"),
19
+ ("tokenizers", "punkt"),
20
+ ]
21
+
22
+ _PY3_DATA_UPDATES = [os.path.join(*path_list) for path_list in DATA_UPDATES]
23
+
24
+
25
+ def add_py3_data(path):
26
+ for item in _PY3_DATA_UPDATES:
27
+ if item in str(path) and "/PY3" not in str(path):
28
+ pos = path.index(item) + len(item)
29
+ if path[pos : pos + 4] == ".zip":
30
+ pos += 4
31
+ path = path[:pos] + "/PY3" + path[pos:]
32
+ break
33
+ return path
34
+
35
+
36
+ # for use in adding /PY3 to the second (filename) argument
37
+ # of the file pointers in data.py
38
+ def py3_data(init_func):
39
+ def _decorator(*args, **kwargs):
40
+ args = (args[0], add_py3_data(args[1])) + args[2:]
41
+ return init_func(*args, **kwargs)
42
+
43
+ return wraps(init_func)(_decorator)
env-llmeval/lib/python3.10/site-packages/nltk/data.py ADDED
@@ -0,0 +1,1441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Utility functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Functions to find and load NLTK resource files, such as corpora,
10
+ grammars, and saved processing objects. Resource files are identified
11
+ using URLs, such as ``nltk:corpora/abc/rural.txt`` or
12
+ ``https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg``.
13
+ The following URL protocols are supported:
14
+
15
+ - ``file:path``: Specifies the file whose path is *path*.
16
+ Both relative and absolute paths may be used.
17
+
18
+ - ``https://host/path``: Specifies the file stored on the web
19
+ server *host* at path *path*.
20
+
21
+ - ``nltk:path``: Specifies the file stored in the NLTK data
22
+ package at *path*. NLTK will search for these files in the
23
+ directories specified by ``nltk.data.path``.
24
+
25
+ If no protocol is specified, then the default protocol ``nltk:`` will
26
+ be used.
27
+
28
+ This module provides to functions that can be used to access a
29
+ resource file, given its URL: ``load()`` loads a given resource, and
30
+ adds it to a resource cache; and ``retrieve()`` copies a given resource
31
+ to a local file.
32
+ """
33
+
34
+ import codecs
35
+ import functools
36
+ import os
37
+ import pickle
38
+ import re
39
+ import sys
40
+ import textwrap
41
+ import zipfile
42
+ from abc import ABCMeta, abstractmethod
43
+ from gzip import WRITE as GZ_WRITE
44
+ from gzip import GzipFile
45
+ from io import BytesIO, TextIOWrapper
46
+ from urllib.request import url2pathname, urlopen
47
+
48
+ try:
49
+ from zlib import Z_SYNC_FLUSH as FLUSH
50
+ except ImportError:
51
+ from zlib import Z_FINISH as FLUSH
52
+
53
+ from nltk import grammar, sem
54
+ from nltk.compat import add_py3_data, py3_data
55
+ from nltk.internals import deprecated
56
+
57
+ textwrap_indent = functools.partial(textwrap.indent, prefix=" ")
58
+
59
+ ######################################################################
60
+ # Search Path
61
+ ######################################################################
62
+
63
+ path = []
64
+ """A list of directories where the NLTK data package might reside.
65
+ These directories will be checked in order when looking for a
66
+ resource in the data package. Note that this allows users to
67
+ substitute in their own versions of resources, if they have them
68
+ (e.g., in their home directory under ~/nltk_data)."""
69
+
70
+ # User-specified locations:
71
+ _paths_from_env = os.environ.get("NLTK_DATA", "").split(os.pathsep)
72
+ path += [d for d in _paths_from_env if d]
73
+ if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/":
74
+ path.append(os.path.expanduser("~/nltk_data"))
75
+
76
+ if sys.platform.startswith("win"):
77
+ # Common locations on Windows:
78
+ path += [
79
+ os.path.join(sys.prefix, "nltk_data"),
80
+ os.path.join(sys.prefix, "share", "nltk_data"),
81
+ os.path.join(sys.prefix, "lib", "nltk_data"),
82
+ os.path.join(os.environ.get("APPDATA", "C:\\"), "nltk_data"),
83
+ r"C:\nltk_data",
84
+ r"D:\nltk_data",
85
+ r"E:\nltk_data",
86
+ ]
87
+ else:
88
+ # Common locations on UNIX & OS X:
89
+ path += [
90
+ os.path.join(sys.prefix, "nltk_data"),
91
+ os.path.join(sys.prefix, "share", "nltk_data"),
92
+ os.path.join(sys.prefix, "lib", "nltk_data"),
93
+ "/usr/share/nltk_data",
94
+ "/usr/local/share/nltk_data",
95
+ "/usr/lib/nltk_data",
96
+ "/usr/local/lib/nltk_data",
97
+ ]
98
+
99
+
100
+ ######################################################################
101
+ # Util Functions
102
+ ######################################################################
103
+
104
+
105
+ def gzip_open_unicode(
106
+ filename,
107
+ mode="rb",
108
+ compresslevel=9,
109
+ encoding="utf-8",
110
+ fileobj=None,
111
+ errors=None,
112
+ newline=None,
113
+ ):
114
+ if fileobj is None:
115
+ fileobj = GzipFile(filename, mode, compresslevel, fileobj)
116
+ return TextIOWrapper(fileobj, encoding, errors, newline)
117
+
118
+
119
+ def split_resource_url(resource_url):
120
+ """
121
+ Splits a resource url into "<protocol>:<path>".
122
+
123
+ >>> windows = sys.platform.startswith('win')
124
+ >>> split_resource_url('nltk:home/nltk')
125
+ ('nltk', 'home/nltk')
126
+ >>> split_resource_url('nltk:/home/nltk')
127
+ ('nltk', '/home/nltk')
128
+ >>> split_resource_url('file:/home/nltk')
129
+ ('file', '/home/nltk')
130
+ >>> split_resource_url('file:///home/nltk')
131
+ ('file', '/home/nltk')
132
+ >>> split_resource_url('file:///C:/home/nltk')
133
+ ('file', '/C:/home/nltk')
134
+ """
135
+ protocol, path_ = resource_url.split(":", 1)
136
+ if protocol == "nltk":
137
+ pass
138
+ elif protocol == "file":
139
+ if path_.startswith("/"):
140
+ path_ = "/" + path_.lstrip("/")
141
+ else:
142
+ path_ = re.sub(r"^/{0,2}", "", path_)
143
+ return protocol, path_
144
+
145
+
146
+ def normalize_resource_url(resource_url):
147
+ r"""
148
+ Normalizes a resource url
149
+
150
+ >>> windows = sys.platform.startswith('win')
151
+ >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \
152
+ ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg'))
153
+ True
154
+ >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file'
155
+ True
156
+ >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file'
157
+ True
158
+ >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file'
159
+ True
160
+ >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file'
161
+ True
162
+ >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file'
163
+ True
164
+ >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file'
165
+ True
166
+ >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file'
167
+ True
168
+ >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg'
169
+ True
170
+ >>> normalize_resource_url('nltk:home/nltk')
171
+ 'nltk:home/nltk'
172
+ >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk'
173
+ True
174
+ >>> normalize_resource_url('https://example.com/dir/file')
175
+ 'https://example.com/dir/file'
176
+ >>> normalize_resource_url('dir/file')
177
+ 'nltk:dir/file'
178
+ """
179
+ try:
180
+ protocol, name = split_resource_url(resource_url)
181
+ except ValueError:
182
+ # the resource url has no protocol, use the nltk protocol by default
183
+ protocol = "nltk"
184
+ name = resource_url
185
+ # use file protocol if the path is an absolute path
186
+ if protocol == "nltk" and os.path.isabs(name):
187
+ protocol = "file://"
188
+ name = normalize_resource_name(name, False, None)
189
+ elif protocol == "file":
190
+ protocol = "file://"
191
+ # name is absolute
192
+ name = normalize_resource_name(name, False, None)
193
+ elif protocol == "nltk":
194
+ protocol = "nltk:"
195
+ name = normalize_resource_name(name, True)
196
+ else:
197
+ # handled by urllib
198
+ protocol += "://"
199
+ return "".join([protocol, name])
200
+
201
+
202
+ def normalize_resource_name(resource_name, allow_relative=True, relative_path=None):
203
+ """
204
+ :type resource_name: str or unicode
205
+ :param resource_name: The name of the resource to search for.
206
+ Resource names are posix-style relative path names, such as
207
+ ``corpora/brown``. Directory names will automatically
208
+ be converted to a platform-appropriate path separator.
209
+ Directory trailing slashes are preserved
210
+
211
+ >>> windows = sys.platform.startswith('win')
212
+ >>> normalize_resource_name('.', True)
213
+ './'
214
+ >>> normalize_resource_name('./', True)
215
+ './'
216
+ >>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file'
217
+ True
218
+ >>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file'
219
+ True
220
+ >>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file'
221
+ True
222
+ >>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file'
223
+ True
224
+ >>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file'
225
+ True
226
+ >>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file'
227
+ True
228
+ """
229
+ is_dir = bool(re.search(r"[\\/.]$", resource_name)) or resource_name.endswith(
230
+ os.path.sep
231
+ )
232
+ if sys.platform.startswith("win"):
233
+ resource_name = resource_name.lstrip("/")
234
+ else:
235
+ resource_name = re.sub(r"^/+", "/", resource_name)
236
+ if allow_relative:
237
+ resource_name = os.path.normpath(resource_name)
238
+ else:
239
+ if relative_path is None:
240
+ relative_path = os.curdir
241
+ resource_name = os.path.abspath(os.path.join(relative_path, resource_name))
242
+ resource_name = resource_name.replace("\\", "/").replace(os.path.sep, "/")
243
+ if sys.platform.startswith("win") and os.path.isabs(resource_name):
244
+ resource_name = "/" + resource_name
245
+ if is_dir and not resource_name.endswith("/"):
246
+ resource_name += "/"
247
+ return resource_name
248
+
249
+
250
+ ######################################################################
251
+ # Path Pointers
252
+ ######################################################################
253
+
254
+
255
+ class PathPointer(metaclass=ABCMeta):
256
+ """
257
+ An abstract base class for 'path pointers,' used by NLTK's data
258
+ package to identify specific paths. Two subclasses exist:
259
+ ``FileSystemPathPointer`` identifies a file that can be accessed
260
+ directly via a given absolute path. ``ZipFilePathPointer``
261
+ identifies a file contained within a zipfile, that can be accessed
262
+ by reading that zipfile.
263
+ """
264
+
265
+ @abstractmethod
266
+ def open(self, encoding=None):
267
+ """
268
+ Return a seekable read-only stream that can be used to read
269
+ the contents of the file identified by this path pointer.
270
+
271
+ :raise IOError: If the path specified by this pointer does
272
+ not contain a readable file.
273
+ """
274
+
275
+ @abstractmethod
276
+ def file_size(self):
277
+ """
278
+ Return the size of the file pointed to by this path pointer,
279
+ in bytes.
280
+
281
+ :raise IOError: If the path specified by this pointer does
282
+ not contain a readable file.
283
+ """
284
+
285
+ @abstractmethod
286
+ def join(self, fileid):
287
+ """
288
+ Return a new path pointer formed by starting at the path
289
+ identified by this pointer, and then following the relative
290
+ path given by ``fileid``. The path components of ``fileid``
291
+ should be separated by forward slashes, regardless of
292
+ the underlying file system's path separator character.
293
+ """
294
+
295
+
296
+ class FileSystemPathPointer(PathPointer, str):
297
+ """
298
+ A path pointer that identifies a file which can be accessed
299
+ directly via a given absolute path.
300
+ """
301
+
302
+ @py3_data
303
+ def __init__(self, _path):
304
+ """
305
+ Create a new path pointer for the given absolute path.
306
+
307
+ :raise IOError: If the given path does not exist.
308
+ """
309
+
310
+ _path = os.path.abspath(_path)
311
+ if not os.path.exists(_path):
312
+ raise OSError("No such file or directory: %r" % _path)
313
+ self._path = _path
314
+
315
+ # There's no need to call str.__init__(), since it's a no-op;
316
+ # str does all of its setup work in __new__.
317
+
318
+ @property
319
+ def path(self):
320
+ """The absolute path identified by this path pointer."""
321
+ return self._path
322
+
323
+ def open(self, encoding=None):
324
+ stream = open(self._path, "rb")
325
+ if encoding is not None:
326
+ stream = SeekableUnicodeStreamReader(stream, encoding)
327
+ return stream
328
+
329
+ def file_size(self):
330
+ return os.stat(self._path).st_size
331
+
332
+ def join(self, fileid):
333
+ _path = os.path.join(self._path, fileid)
334
+ return FileSystemPathPointer(_path)
335
+
336
+ def __repr__(self):
337
+ return "FileSystemPathPointer(%r)" % self._path
338
+
339
+ def __str__(self):
340
+ return self._path
341
+
342
+
343
+ @deprecated("Use gzip.GzipFile instead as it also uses a buffer.")
344
+ class BufferedGzipFile(GzipFile):
345
+ """A ``GzipFile`` subclass for compatibility with older nltk releases.
346
+
347
+ Use ``GzipFile`` directly as it also buffers in all supported
348
+ Python versions.
349
+ """
350
+
351
+ @py3_data
352
+ def __init__(
353
+ self, filename=None, mode=None, compresslevel=9, fileobj=None, **kwargs
354
+ ):
355
+ """Return a buffered gzip file object."""
356
+ GzipFile.__init__(self, filename, mode, compresslevel, fileobj)
357
+
358
+ def write(self, data):
359
+ # This is identical to GzipFile.write but does not return
360
+ # the bytes written to retain compatibility.
361
+ super().write(data)
362
+
363
+
364
+ class GzipFileSystemPathPointer(FileSystemPathPointer):
365
+ """
366
+ A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed
367
+ file located at a given absolute path. ``GzipFileSystemPathPointer`` is
368
+ appropriate for loading large gzip-compressed pickle objects efficiently.
369
+ """
370
+
371
+ def open(self, encoding=None):
372
+ stream = GzipFile(self._path, "rb")
373
+ if encoding:
374
+ stream = SeekableUnicodeStreamReader(stream, encoding)
375
+ return stream
376
+
377
+
378
+ class ZipFilePathPointer(PathPointer):
379
+ """
380
+ A path pointer that identifies a file contained within a zipfile,
381
+ which can be accessed by reading that zipfile.
382
+ """
383
+
384
+ @py3_data
385
+ def __init__(self, zipfile, entry=""):
386
+ """
387
+ Create a new path pointer pointing at the specified entry
388
+ in the given zipfile.
389
+
390
+ :raise IOError: If the given zipfile does not exist, or if it
391
+ does not contain the specified entry.
392
+ """
393
+ if isinstance(zipfile, str):
394
+ zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
395
+
396
+ # Check that the entry exists:
397
+ if entry:
398
+
399
+ # Normalize the entry string, it should be relative:
400
+ entry = normalize_resource_name(entry, True, "/").lstrip("/")
401
+
402
+ try:
403
+ zipfile.getinfo(entry)
404
+ except Exception as e:
405
+ # Sometimes directories aren't explicitly listed in
406
+ # the zip file. So if `entry` is a directory name,
407
+ # then check if the zipfile contains any files that
408
+ # are under the given directory.
409
+ if entry.endswith("/") and [
410
+ n for n in zipfile.namelist() if n.startswith(entry)
411
+ ]:
412
+ pass # zipfile contains a file in that directory.
413
+ else:
414
+ # Otherwise, complain.
415
+ raise OSError(
416
+ f"Zipfile {zipfile.filename!r} does not contain {entry!r}"
417
+ ) from e
418
+ self._zipfile = zipfile
419
+ self._entry = entry
420
+
421
+ @property
422
+ def zipfile(self):
423
+ """
424
+ The zipfile.ZipFile object used to access the zip file
425
+ containing the entry identified by this path pointer.
426
+ """
427
+ return self._zipfile
428
+
429
+ @property
430
+ def entry(self):
431
+ """
432
+ The name of the file within zipfile that this path
433
+ pointer points to.
434
+ """
435
+ return self._entry
436
+
437
+ def open(self, encoding=None):
438
+ data = self._zipfile.read(self._entry)
439
+ stream = BytesIO(data)
440
+ if self._entry.endswith(".gz"):
441
+ stream = GzipFile(self._entry, fileobj=stream)
442
+ elif encoding is not None:
443
+ stream = SeekableUnicodeStreamReader(stream, encoding)
444
+ return stream
445
+
446
+ def file_size(self):
447
+ return self._zipfile.getinfo(self._entry).file_size
448
+
449
+ def join(self, fileid):
450
+ entry = f"{self._entry}/{fileid}"
451
+ return ZipFilePathPointer(self._zipfile, entry)
452
+
453
+ def __repr__(self):
454
+ return f"ZipFilePathPointer({self._zipfile.filename!r}, {self._entry!r})"
455
+
456
+ def __str__(self):
457
+ return os.path.normpath(os.path.join(self._zipfile.filename, self._entry))
458
+
459
+
460
+ ######################################################################
461
+ # Access Functions
462
+ ######################################################################
463
+
464
+ # Don't use a weak dictionary, because in the common case this
465
+ # causes a lot more reloading that necessary.
466
+ _resource_cache = {}
467
+ """A dictionary used to cache resources so that they won't
468
+ need to be loaded more than once."""
469
+
470
+
471
+ def find(resource_name, paths=None):
472
+ """
473
+ Find the given resource by searching through the directories and
474
+ zip files in paths, where a None or empty string specifies an absolute path.
475
+ Returns a corresponding path name. If the given resource is not
476
+ found, raise a ``LookupError``, whose message gives a pointer to
477
+ the installation instructions for the NLTK downloader.
478
+
479
+ Zip File Handling:
480
+
481
+ - If ``resource_name`` contains a component with a ``.zip``
482
+ extension, then it is assumed to be a zipfile; and the
483
+ remaining path components are used to look inside the zipfile.
484
+
485
+ - If any element of ``nltk.data.path`` has a ``.zip`` extension,
486
+ then it is assumed to be a zipfile.
487
+
488
+ - If a given resource name that does not contain any zipfile
489
+ component is not found initially, then ``find()`` will make a
490
+ second attempt to find that resource, by replacing each
491
+ component *p* in the path with *p.zip/p*. For example, this
492
+ allows ``find()`` to map the resource name
493
+ ``corpora/chat80/cities.pl`` to a zip file path pointer to
494
+ ``corpora/chat80.zip/chat80/cities.pl``.
495
+
496
+ - When using ``find()`` to locate a directory contained in a
497
+ zipfile, the resource name must end with the forward slash
498
+ character. Otherwise, ``find()`` will not locate the
499
+ directory.
500
+
501
+ :type resource_name: str or unicode
502
+ :param resource_name: The name of the resource to search for.
503
+ Resource names are posix-style relative path names, such as
504
+ ``corpora/brown``. Directory names will be
505
+ automatically converted to a platform-appropriate path separator.
506
+ :rtype: str
507
+ """
508
+ resource_name = normalize_resource_name(resource_name, True)
509
+
510
+ # Resolve default paths at runtime in-case the user overrides
511
+ # nltk.data.path
512
+ if paths is None:
513
+ paths = path
514
+
515
+ # Check if the resource name includes a zipfile name
516
+ m = re.match(r"(.*\.zip)/?(.*)$|", resource_name)
517
+ zipfile, zipentry = m.groups()
518
+
519
+ # Check each item in our path
520
+ for path_ in paths:
521
+ # Is the path item a zipfile?
522
+ if path_ and (os.path.isfile(path_) and path_.endswith(".zip")):
523
+ try:
524
+ return ZipFilePathPointer(path_, resource_name)
525
+ except OSError:
526
+ # resource not in zipfile
527
+ continue
528
+
529
+ # Is the path item a directory or is resource_name an absolute path?
530
+ elif not path_ or os.path.isdir(path_):
531
+ if zipfile is None:
532
+ p = os.path.join(path_, url2pathname(resource_name))
533
+ if os.path.exists(p):
534
+ if p.endswith(".gz"):
535
+ return GzipFileSystemPathPointer(p)
536
+ else:
537
+ return FileSystemPathPointer(p)
538
+ else:
539
+ p = os.path.join(path_, url2pathname(zipfile))
540
+ if os.path.exists(p):
541
+ try:
542
+ return ZipFilePathPointer(p, zipentry)
543
+ except OSError:
544
+ # resource not in zipfile
545
+ continue
546
+
547
+ # Fallback: if the path doesn't include a zip file, then try
548
+ # again, assuming that one of the path components is inside a
549
+ # zipfile of the same name.
550
+ if zipfile is None:
551
+ pieces = resource_name.split("/")
552
+ for i in range(len(pieces)):
553
+ modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:])
554
+ try:
555
+ return find(modified_name, paths)
556
+ except LookupError:
557
+ pass
558
+
559
+ # Identify the package (i.e. the .zip file) to download.
560
+ resource_zipname = resource_name.split("/")[1]
561
+ if resource_zipname.endswith(".zip"):
562
+ resource_zipname = resource_zipname.rpartition(".")[0]
563
+ # Display a friendly error message if the resource wasn't found:
564
+ msg = str(
565
+ "Resource \33[93m{resource}\033[0m not found.\n"
566
+ "Please use the NLTK Downloader to obtain the resource:\n\n"
567
+ "\33[31m" # To display red text in terminal.
568
+ ">>> import nltk\n"
569
+ ">>> nltk.download('{resource}')\n"
570
+ "\033[0m"
571
+ ).format(resource=resource_zipname)
572
+ msg = textwrap_indent(msg)
573
+
574
+ msg += "\n For more information see: https://www.nltk.org/data.html\n"
575
+
576
+ msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format(
577
+ resource_name=resource_name
578
+ )
579
+
580
+ msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths)
581
+ sep = "*" * 70
582
+ resource_not_found = f"\n{sep}\n{msg}\n{sep}\n"
583
+ raise LookupError(resource_not_found)
584
+
585
+
586
+ def retrieve(resource_url, filename=None, verbose=True):
587
+ """
588
+ Copy the given resource to a local file. If no filename is
589
+ specified, then use the URL's filename. If there is already a
590
+ file named ``filename``, then raise a ``ValueError``.
591
+
592
+ :type resource_url: str
593
+ :param resource_url: A URL specifying where the resource should be
594
+ loaded from. The default protocol is "nltk:", which searches
595
+ for the file in the the NLTK data package.
596
+ """
597
+ resource_url = normalize_resource_url(resource_url)
598
+ if filename is None:
599
+ if resource_url.startswith("file:"):
600
+ filename = os.path.split(resource_url)[-1]
601
+ else:
602
+ filename = re.sub(r"(^\w+:)?.*/", "", resource_url)
603
+ if os.path.exists(filename):
604
+ filename = os.path.abspath(filename)
605
+ raise ValueError("File %r already exists!" % filename)
606
+
607
+ if verbose:
608
+ print(f"Retrieving {resource_url!r}, saving to {filename!r}")
609
+
610
+ # Open the input & output streams.
611
+ infile = _open(resource_url)
612
+
613
+ # Copy infile -> outfile, using 64k blocks.
614
+ with open(filename, "wb") as outfile:
615
+ while True:
616
+ s = infile.read(1024 * 64) # 64k blocks.
617
+ outfile.write(s)
618
+ if not s:
619
+ break
620
+
621
+ infile.close()
622
+
623
+
624
+ #: A dictionary describing the formats that are supported by NLTK's
625
+ #: load() method. Keys are format names, and values are format
626
+ #: descriptions.
627
+ FORMATS = {
628
+ "pickle": "A serialized python object, stored using the pickle module.",
629
+ "json": "A serialized python object, stored using the json module.",
630
+ "yaml": "A serialized python object, stored using the yaml module.",
631
+ "cfg": "A context free grammar.",
632
+ "pcfg": "A probabilistic CFG.",
633
+ "fcfg": "A feature CFG.",
634
+ "fol": "A list of first order logic expressions, parsed with "
635
+ "nltk.sem.logic.Expression.fromstring.",
636
+ "logic": "A list of first order logic expressions, parsed with "
637
+ "nltk.sem.logic.LogicParser. Requires an additional logic_parser "
638
+ "parameter",
639
+ "val": "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.",
640
+ "raw": "The raw (byte string) contents of a file.",
641
+ "text": "The raw (unicode string) contents of a file. ",
642
+ }
643
+
644
+ #: A dictionary mapping from file extensions to format names, used
645
+ #: by load() when format="auto" to decide the format for a
646
+ #: given resource url.
647
+ AUTO_FORMATS = {
648
+ "pickle": "pickle",
649
+ "json": "json",
650
+ "yaml": "yaml",
651
+ "cfg": "cfg",
652
+ "pcfg": "pcfg",
653
+ "fcfg": "fcfg",
654
+ "fol": "fol",
655
+ "logic": "logic",
656
+ "val": "val",
657
+ "txt": "text",
658
+ "text": "text",
659
+ }
660
+
661
+
662
+ def load(
663
+ resource_url,
664
+ format="auto",
665
+ cache=True,
666
+ verbose=False,
667
+ logic_parser=None,
668
+ fstruct_reader=None,
669
+ encoding=None,
670
+ ):
671
+ """
672
+ Load a given resource from the NLTK data package. The following
673
+ resource formats are currently supported:
674
+
675
+ - ``pickle``
676
+ - ``json``
677
+ - ``yaml``
678
+ - ``cfg`` (context free grammars)
679
+ - ``pcfg`` (probabilistic CFGs)
680
+ - ``fcfg`` (feature-based CFGs)
681
+ - ``fol`` (formulas of First Order Logic)
682
+ - ``logic`` (Logical formulas to be parsed by the given logic_parser)
683
+ - ``val`` (valuation of First Order Logic model)
684
+ - ``text`` (the file contents as a unicode string)
685
+ - ``raw`` (the raw file contents as a byte string)
686
+
687
+ If no format is specified, ``load()`` will attempt to determine a
688
+ format based on the resource name's file extension. If that
689
+ fails, ``load()`` will raise a ``ValueError`` exception.
690
+
691
+ For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``),
692
+ it tries to decode the raw contents using UTF-8, and if that doesn't
693
+ work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding``
694
+ is specified.
695
+
696
+ :type resource_url: str
697
+ :param resource_url: A URL specifying where the resource should be
698
+ loaded from. The default protocol is "nltk:", which searches
699
+ for the file in the the NLTK data package.
700
+ :type cache: bool
701
+ :param cache: If true, add this resource to a cache. If load()
702
+ finds a resource in its cache, then it will return it from the
703
+ cache rather than loading it.
704
+ :type verbose: bool
705
+ :param verbose: If true, print a message when loading a resource.
706
+ Messages are not displayed when a resource is retrieved from
707
+ the cache.
708
+ :type logic_parser: LogicParser
709
+ :param logic_parser: The parser that will be used to parse logical
710
+ expressions.
711
+ :type fstruct_reader: FeatStructReader
712
+ :param fstruct_reader: The parser that will be used to parse the
713
+ feature structure of an fcfg.
714
+ :type encoding: str
715
+ :param encoding: the encoding of the input; only used for text formats.
716
+ """
717
+ resource_url = normalize_resource_url(resource_url)
718
+ resource_url = add_py3_data(resource_url)
719
+
720
+ # Determine the format of the resource.
721
+ if format == "auto":
722
+ resource_url_parts = resource_url.split(".")
723
+ ext = resource_url_parts[-1]
724
+ if ext == "gz":
725
+ ext = resource_url_parts[-2]
726
+ format = AUTO_FORMATS.get(ext)
727
+ if format is None:
728
+ raise ValueError(
729
+ "Could not determine format for %s based "
730
+ 'on its file\nextension; use the "format" '
731
+ "argument to specify the format explicitly." % resource_url
732
+ )
733
+
734
+ if format not in FORMATS:
735
+ raise ValueError(f"Unknown format type: {format}!")
736
+
737
+ # If we've cached the resource, then just return it.
738
+ if cache:
739
+ resource_val = _resource_cache.get((resource_url, format))
740
+ if resource_val is not None:
741
+ if verbose:
742
+ print(f"<<Using cached copy of {resource_url}>>")
743
+ return resource_val
744
+
745
+ # Let the user know what's going on.
746
+ if verbose:
747
+ print(f"<<Loading {resource_url}>>")
748
+
749
+ # Load the resource.
750
+ opened_resource = _open(resource_url)
751
+
752
+ if format == "raw":
753
+ resource_val = opened_resource.read()
754
+ elif format == "pickle":
755
+ resource_val = pickle.load(opened_resource)
756
+ elif format == "json":
757
+ import json
758
+
759
+ from nltk.jsontags import json_tags
760
+
761
+ resource_val = json.load(opened_resource)
762
+ tag = None
763
+ if len(resource_val) != 1:
764
+ tag = next(resource_val.keys())
765
+ if tag not in json_tags:
766
+ raise ValueError("Unknown json tag.")
767
+ elif format == "yaml":
768
+ import yaml
769
+
770
+ resource_val = yaml.safe_load(opened_resource)
771
+ else:
772
+ # The resource is a text format.
773
+ binary_data = opened_resource.read()
774
+ if encoding is not None:
775
+ string_data = binary_data.decode(encoding)
776
+ else:
777
+ try:
778
+ string_data = binary_data.decode("utf-8")
779
+ except UnicodeDecodeError:
780
+ string_data = binary_data.decode("latin-1")
781
+ if format == "text":
782
+ resource_val = string_data
783
+ elif format == "cfg":
784
+ resource_val = grammar.CFG.fromstring(string_data, encoding=encoding)
785
+ elif format == "pcfg":
786
+ resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding)
787
+ elif format == "fcfg":
788
+ resource_val = grammar.FeatureGrammar.fromstring(
789
+ string_data,
790
+ logic_parser=logic_parser,
791
+ fstruct_reader=fstruct_reader,
792
+ encoding=encoding,
793
+ )
794
+ elif format == "fol":
795
+ resource_val = sem.read_logic(
796
+ string_data,
797
+ logic_parser=sem.logic.LogicParser(),
798
+ encoding=encoding,
799
+ )
800
+ elif format == "logic":
801
+ resource_val = sem.read_logic(
802
+ string_data, logic_parser=logic_parser, encoding=encoding
803
+ )
804
+ elif format == "val":
805
+ resource_val = sem.read_valuation(string_data, encoding=encoding)
806
+ else:
807
+ raise AssertionError(
808
+ "Internal NLTK error: Format %s isn't "
809
+ "handled by nltk.data.load()" % (format,)
810
+ )
811
+
812
+ opened_resource.close()
813
+
814
+ # If requested, add it to the cache.
815
+ if cache:
816
+ try:
817
+ _resource_cache[(resource_url, format)] = resource_val
818
+ # TODO: add this line
819
+ # print('<<Caching a copy of %s>>' % (resource_url,))
820
+ except TypeError:
821
+ # We can't create weak references to some object types, like
822
+ # strings and tuples. For now, just don't cache them.
823
+ pass
824
+
825
+ return resource_val
826
+
827
+
828
+ def show_cfg(resource_url, escape="##"):
829
+ """
830
+ Write out a grammar file, ignoring escaped and empty lines.
831
+
832
+ :type resource_url: str
833
+ :param resource_url: A URL specifying where the resource should be
834
+ loaded from. The default protocol is "nltk:", which searches
835
+ for the file in the the NLTK data package.
836
+ :type escape: str
837
+ :param escape: Prepended string that signals lines to be ignored
838
+ """
839
+ resource_url = normalize_resource_url(resource_url)
840
+ resource_val = load(resource_url, format="text", cache=False)
841
+ lines = resource_val.splitlines()
842
+ for l in lines:
843
+ if l.startswith(escape):
844
+ continue
845
+ if re.match("^$", l):
846
+ continue
847
+ print(l)
848
+
849
+
850
+ def clear_cache():
851
+ """
852
+ Remove all objects from the resource cache.
853
+ :see: load()
854
+ """
855
+ _resource_cache.clear()
856
+
857
+
858
+ def _open(resource_url):
859
+ """
860
+ Helper function that returns an open file object for a resource,
861
+ given its resource URL. If the given resource URL uses the "nltk:"
862
+ protocol, or uses no protocol, then use ``nltk.data.find`` to find
863
+ its path, and open it with the given mode; if the resource URL
864
+ uses the 'file' protocol, then open the file with the given mode;
865
+ otherwise, delegate to ``urllib2.urlopen``.
866
+
867
+ :type resource_url: str
868
+ :param resource_url: A URL specifying where the resource should be
869
+ loaded from. The default protocol is "nltk:", which searches
870
+ for the file in the the NLTK data package.
871
+ """
872
+ resource_url = normalize_resource_url(resource_url)
873
+ protocol, path_ = split_resource_url(resource_url)
874
+
875
+ if protocol is None or protocol.lower() == "nltk":
876
+ return find(path_, path + [""]).open()
877
+ elif protocol.lower() == "file":
878
+ # urllib might not use mode='rb', so handle this one ourselves:
879
+ return find(path_, [""]).open()
880
+ else:
881
+ return urlopen(resource_url)
882
+
883
+
884
+ ######################################################################
885
+ # Lazy Resource Loader
886
+ ######################################################################
887
+
888
+
889
+ class LazyLoader:
890
+ @py3_data
891
+ def __init__(self, _path):
892
+ self._path = _path
893
+
894
+ def __load(self):
895
+ resource = load(self._path)
896
+ # This is where the magic happens! Transform ourselves into
897
+ # the object by modifying our own __dict__ and __class__ to
898
+ # match that of `resource`.
899
+ self.__dict__ = resource.__dict__
900
+ self.__class__ = resource.__class__
901
+
902
+ def __getattr__(self, attr):
903
+ self.__load()
904
+ # This looks circular, but its not, since __load() changes our
905
+ # __class__ to something new:
906
+ return getattr(self, attr)
907
+
908
+ def __repr__(self):
909
+ self.__load()
910
+ # This looks circular, but its not, since __load() changes our
911
+ # __class__ to something new:
912
+ return repr(self)
913
+
914
+
915
+ ######################################################################
916
+ # Open-On-Demand ZipFile
917
+ ######################################################################
918
+
919
+
920
+ class OpenOnDemandZipFile(zipfile.ZipFile):
921
+ """
922
+ A subclass of ``zipfile.ZipFile`` that closes its file pointer
923
+ whenever it is not using it; and re-opens it when it needs to read
924
+ data from the zipfile. This is useful for reducing the number of
925
+ open file handles when many zip files are being accessed at once.
926
+ ``OpenOnDemandZipFile`` must be constructed from a filename, not a
927
+ file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is
928
+ read-only (i.e. ``write()`` and ``writestr()`` are disabled.
929
+ """
930
+
931
+ @py3_data
932
+ def __init__(self, filename):
933
+ if not isinstance(filename, str):
934
+ raise TypeError("ReopenableZipFile filename must be a string")
935
+ zipfile.ZipFile.__init__(self, filename)
936
+ assert self.filename == filename
937
+ self.close()
938
+ # After closing a ZipFile object, the _fileRefCnt needs to be cleared
939
+ # for Python2and3 compatible code.
940
+ self._fileRefCnt = 0
941
+
942
+ def read(self, name):
943
+ assert self.fp is None
944
+ self.fp = open(self.filename, "rb")
945
+ value = zipfile.ZipFile.read(self, name)
946
+ # Ensure that _fileRefCnt needs to be set for Python2and3 compatible code.
947
+ # Since we only opened one file here, we add 1.
948
+ self._fileRefCnt += 1
949
+ self.close()
950
+ return value
951
+
952
+ def write(self, *args, **kwargs):
953
+ """:raise NotImplementedError: OpenOnDemandZipfile is read-only"""
954
+ raise NotImplementedError("OpenOnDemandZipfile is read-only")
955
+
956
+ def writestr(self, *args, **kwargs):
957
+ """:raise NotImplementedError: OpenOnDemandZipfile is read-only"""
958
+ raise NotImplementedError("OpenOnDemandZipfile is read-only")
959
+
960
+ def __repr__(self):
961
+ return repr("OpenOnDemandZipFile(%r)" % self.filename)
962
+
963
+
964
+ ######################################################################
965
+ # Seekable Unicode Stream Reader
966
+ ######################################################################
967
+
968
+
969
+ class SeekableUnicodeStreamReader:
970
+ """
971
+ A stream reader that automatically encodes the source byte stream
972
+ into unicode (like ``codecs.StreamReader``); but still supports the
973
+ ``seek()`` and ``tell()`` operations correctly. This is in contrast
974
+ to ``codecs.StreamReader``, which provide *broken* ``seek()`` and
975
+ ``tell()`` methods.
976
+
977
+ This class was motivated by ``StreamBackedCorpusView``, which
978
+ makes extensive use of ``seek()`` and ``tell()``, and needs to be
979
+ able to handle unicode-encoded files.
980
+
981
+ Note: this class requires stateless decoders. To my knowledge,
982
+ this shouldn't cause a problem with any of python's builtin
983
+ unicode encodings.
984
+ """
985
+
986
+ DEBUG = True # : If true, then perform extra sanity checks.
987
+
988
+ @py3_data
989
+ def __init__(self, stream, encoding, errors="strict"):
990
+ # Rewind the stream to its beginning.
991
+ stream.seek(0)
992
+
993
+ self.stream = stream
994
+ """The underlying stream."""
995
+
996
+ self.encoding = encoding
997
+ """The name of the encoding that should be used to encode the
998
+ underlying stream."""
999
+
1000
+ self.errors = errors
1001
+ """The error mode that should be used when decoding data from
1002
+ the underlying stream. Can be 'strict', 'ignore', or
1003
+ 'replace'."""
1004
+
1005
+ self.decode = codecs.getdecoder(encoding)
1006
+ """The function that is used to decode byte strings into
1007
+ unicode strings."""
1008
+
1009
+ self.bytebuffer = b""
1010
+ """A buffer to use bytes that have been read but have not yet
1011
+ been decoded. This is only used when the final bytes from
1012
+ a read do not form a complete encoding for a character."""
1013
+
1014
+ self.linebuffer = None
1015
+ """A buffer used by ``readline()`` to hold characters that have
1016
+ been read, but have not yet been returned by ``read()`` or
1017
+ ``readline()``. This buffer consists of a list of unicode
1018
+ strings, where each string corresponds to a single line.
1019
+ The final element of the list may or may not be a complete
1020
+ line. Note that the existence of a linebuffer makes the
1021
+ ``tell()`` operation more complex, because it must backtrack
1022
+ to the beginning of the buffer to determine the correct
1023
+ file position in the underlying byte stream."""
1024
+
1025
+ self._rewind_checkpoint = 0
1026
+ """The file position at which the most recent read on the
1027
+ underlying stream began. This is used, together with
1028
+ ``_rewind_numchars``, to backtrack to the beginning of
1029
+ ``linebuffer`` (which is required by ``tell()``)."""
1030
+
1031
+ self._rewind_numchars = None
1032
+ """The number of characters that have been returned since the
1033
+ read that started at ``_rewind_checkpoint``. This is used,
1034
+ together with ``_rewind_checkpoint``, to backtrack to the
1035
+ beginning of ``linebuffer`` (which is required by ``tell()``)."""
1036
+
1037
+ self._bom = self._check_bom()
1038
+ """The length of the byte order marker at the beginning of
1039
+ the stream (or None for no byte order marker)."""
1040
+
1041
+ # /////////////////////////////////////////////////////////////////
1042
+ # Read methods
1043
+ # /////////////////////////////////////////////////////////////////
1044
+
1045
+ def read(self, size=None):
1046
+ """
1047
+ Read up to ``size`` bytes, decode them using this reader's
1048
+ encoding, and return the resulting unicode string.
1049
+
1050
+ :param size: The maximum number of bytes to read. If not
1051
+ specified, then read as many bytes as possible.
1052
+ :type size: int
1053
+ :rtype: unicode
1054
+ """
1055
+ chars = self._read(size)
1056
+
1057
+ # If linebuffer is not empty, then include it in the result
1058
+ if self.linebuffer:
1059
+ chars = "".join(self.linebuffer) + chars
1060
+ self.linebuffer = None
1061
+ self._rewind_numchars = None
1062
+
1063
+ return chars
1064
+
1065
+ def discard_line(self):
1066
+ if self.linebuffer and len(self.linebuffer) > 1:
1067
+ line = self.linebuffer.pop(0)
1068
+ self._rewind_numchars += len(line)
1069
+ else:
1070
+ self.stream.readline()
1071
+
1072
+ def readline(self, size=None):
1073
+ """
1074
+ Read a line of text, decode it using this reader's encoding,
1075
+ and return the resulting unicode string.
1076
+
1077
+ :param size: The maximum number of bytes to read. If no
1078
+ newline is encountered before ``size`` bytes have been read,
1079
+ then the returned value may not be a complete line of text.
1080
+ :type size: int
1081
+ """
1082
+ # If we have a non-empty linebuffer, then return the first
1083
+ # line from it. (Note that the last element of linebuffer may
1084
+ # not be a complete line; so let _read() deal with it.)
1085
+ if self.linebuffer and len(self.linebuffer) > 1:
1086
+ line = self.linebuffer.pop(0)
1087
+ self._rewind_numchars += len(line)
1088
+ return line
1089
+
1090
+ readsize = size or 72
1091
+ chars = ""
1092
+
1093
+ # If there's a remaining incomplete line in the buffer, add it.
1094
+ if self.linebuffer:
1095
+ chars += self.linebuffer.pop()
1096
+ self.linebuffer = None
1097
+
1098
+ while True:
1099
+ startpos = self.stream.tell() - len(self.bytebuffer)
1100
+ new_chars = self._read(readsize)
1101
+
1102
+ # If we're at a '\r', then read one extra character, since
1103
+ # it might be a '\n', to get the proper line ending.
1104
+ if new_chars and new_chars.endswith("\r"):
1105
+ new_chars += self._read(1)
1106
+
1107
+ chars += new_chars
1108
+ lines = chars.splitlines(True)
1109
+ if len(lines) > 1:
1110
+ line = lines[0]
1111
+ self.linebuffer = lines[1:]
1112
+ self._rewind_numchars = len(new_chars) - (len(chars) - len(line))
1113
+ self._rewind_checkpoint = startpos
1114
+ break
1115
+ elif len(lines) == 1:
1116
+ line0withend = lines[0]
1117
+ line0withoutend = lines[0].splitlines(False)[0]
1118
+ if line0withend != line0withoutend: # complete line
1119
+ line = line0withend
1120
+ break
1121
+
1122
+ if not new_chars or size is not None:
1123
+ line = chars
1124
+ break
1125
+
1126
+ # Read successively larger blocks of text.
1127
+ if readsize < 8000:
1128
+ readsize *= 2
1129
+
1130
+ return line
1131
+
1132
+ def readlines(self, sizehint=None, keepends=True):
1133
+ """
1134
+ Read this file's contents, decode them using this reader's
1135
+ encoding, and return it as a list of unicode lines.
1136
+
1137
+ :rtype: list(unicode)
1138
+ :param sizehint: Ignored.
1139
+ :param keepends: If false, then strip newlines.
1140
+ """
1141
+ return self.read().splitlines(keepends)
1142
+
1143
+ def next(self):
1144
+ """Return the next decoded line from the underlying stream."""
1145
+ line = self.readline()
1146
+ if line:
1147
+ return line
1148
+ else:
1149
+ raise StopIteration
1150
+
1151
+ def __next__(self):
1152
+ return self.next()
1153
+
1154
+ def __iter__(self):
1155
+ """Return self"""
1156
+ return self
1157
+
1158
+ def __del__(self):
1159
+ # let garbage collector deal with still opened streams
1160
+ if not self.closed:
1161
+ self.close()
1162
+
1163
+ def __enter__(self):
1164
+ return self
1165
+
1166
+ def __exit__(self, type, value, traceback):
1167
+ self.close()
1168
+
1169
+ def xreadlines(self):
1170
+ """Return self"""
1171
+ return self
1172
+
1173
+ # /////////////////////////////////////////////////////////////////
1174
+ # Pass-through methods & properties
1175
+ # /////////////////////////////////////////////////////////////////
1176
+
1177
+ @property
1178
+ def closed(self):
1179
+ """True if the underlying stream is closed."""
1180
+ return self.stream.closed
1181
+
1182
+ @property
1183
+ def name(self):
1184
+ """The name of the underlying stream."""
1185
+ return self.stream.name
1186
+
1187
+ @property
1188
+ def mode(self):
1189
+ """The mode of the underlying stream."""
1190
+ return self.stream.mode
1191
+
1192
+ def close(self):
1193
+ """
1194
+ Close the underlying stream.
1195
+ """
1196
+ self.stream.close()
1197
+
1198
+ # /////////////////////////////////////////////////////////////////
1199
+ # Seek and tell
1200
+ # /////////////////////////////////////////////////////////////////
1201
+
1202
+ def seek(self, offset, whence=0):
1203
+ """
1204
+ Move the stream to a new file position. If the reader is
1205
+ maintaining any buffers, then they will be cleared.
1206
+
1207
+ :param offset: A byte count offset.
1208
+ :param whence: If 0, then the offset is from the start of the file
1209
+ (offset should be positive), if 1, then the offset is from the
1210
+ current position (offset may be positive or negative); and if 2,
1211
+ then the offset is from the end of the file (offset should
1212
+ typically be negative).
1213
+ """
1214
+ if whence == 1:
1215
+ raise ValueError(
1216
+ "Relative seek is not supported for "
1217
+ "SeekableUnicodeStreamReader -- consider "
1218
+ "using char_seek_forward() instead."
1219
+ )
1220
+ self.stream.seek(offset, whence)
1221
+ self.linebuffer = None
1222
+ self.bytebuffer = b""
1223
+ self._rewind_numchars = None
1224
+ self._rewind_checkpoint = self.stream.tell()
1225
+
1226
+ def char_seek_forward(self, offset):
1227
+ """
1228
+ Move the read pointer forward by ``offset`` characters.
1229
+ """
1230
+ if offset < 0:
1231
+ raise ValueError("Negative offsets are not supported")
1232
+ # Clear all buffers.
1233
+ self.seek(self.tell())
1234
+ # Perform the seek operation.
1235
+ self._char_seek_forward(offset)
1236
+
1237
+ def _char_seek_forward(self, offset, est_bytes=None):
1238
+ """
1239
+ Move the file position forward by ``offset`` characters,
1240
+ ignoring all buffers.
1241
+
1242
+ :param est_bytes: A hint, giving an estimate of the number of
1243
+ bytes that will be needed to move forward by ``offset`` chars.
1244
+ Defaults to ``offset``.
1245
+ """
1246
+ if est_bytes is None:
1247
+ est_bytes = offset
1248
+ bytes = b""
1249
+
1250
+ while True:
1251
+ # Read in a block of bytes.
1252
+ newbytes = self.stream.read(est_bytes - len(bytes))
1253
+ bytes += newbytes
1254
+
1255
+ # Decode the bytes to characters.
1256
+ chars, bytes_decoded = self._incr_decode(bytes)
1257
+
1258
+ # If we got the right number of characters, then seek
1259
+ # backwards over any truncated characters, and return.
1260
+ if len(chars) == offset:
1261
+ self.stream.seek(-len(bytes) + bytes_decoded, 1)
1262
+ return
1263
+
1264
+ # If we went too far, then we can back-up until we get it
1265
+ # right, using the bytes we've already read.
1266
+ if len(chars) > offset:
1267
+ while len(chars) > offset:
1268
+ # Assume at least one byte/char.
1269
+ est_bytes += offset - len(chars)
1270
+ chars, bytes_decoded = self._incr_decode(bytes[:est_bytes])
1271
+ self.stream.seek(-len(bytes) + bytes_decoded, 1)
1272
+ return
1273
+
1274
+ # Otherwise, we haven't read enough bytes yet; loop again.
1275
+ est_bytes += offset - len(chars)
1276
+
1277
+ def tell(self):
1278
+ """
1279
+ Return the current file position on the underlying byte
1280
+ stream. If this reader is maintaining any buffers, then the
1281
+ returned file position will be the position of the beginning
1282
+ of those buffers.
1283
+ """
1284
+ # If nothing's buffered, then just return our current filepos:
1285
+ if self.linebuffer is None:
1286
+ return self.stream.tell() - len(self.bytebuffer)
1287
+
1288
+ # Otherwise, we'll need to backtrack the filepos until we
1289
+ # reach the beginning of the buffer.
1290
+
1291
+ # Store our original file position, so we can return here.
1292
+ orig_filepos = self.stream.tell()
1293
+
1294
+ # Calculate an estimate of where we think the newline is.
1295
+ bytes_read = (orig_filepos - len(self.bytebuffer)) - self._rewind_checkpoint
1296
+ buf_size = sum(len(line) for line in self.linebuffer)
1297
+ est_bytes = int(
1298
+ bytes_read * self._rewind_numchars / (self._rewind_numchars + buf_size)
1299
+ )
1300
+
1301
+ self.stream.seek(self._rewind_checkpoint)
1302
+ self._char_seek_forward(self._rewind_numchars, est_bytes)
1303
+ filepos = self.stream.tell()
1304
+
1305
+ # Sanity check
1306
+ if self.DEBUG:
1307
+ self.stream.seek(filepos)
1308
+ check1 = self._incr_decode(self.stream.read(50))[0]
1309
+ check2 = "".join(self.linebuffer)
1310
+ assert check1.startswith(check2) or check2.startswith(check1)
1311
+
1312
+ # Return to our original filepos (so we don't have to throw
1313
+ # out our buffer.)
1314
+ self.stream.seek(orig_filepos)
1315
+
1316
+ # Return the calculated filepos
1317
+ return filepos
1318
+
1319
+ # /////////////////////////////////////////////////////////////////
1320
+ # Helper methods
1321
+ # /////////////////////////////////////////////////////////////////
1322
+
1323
+ def _read(self, size=None):
1324
+ """
1325
+ Read up to ``size`` bytes from the underlying stream, decode
1326
+ them using this reader's encoding, and return the resulting
1327
+ unicode string. ``linebuffer`` is not included in the result.
1328
+ """
1329
+ if size == 0:
1330
+ return ""
1331
+
1332
+ # Skip past the byte order marker, if present.
1333
+ if self._bom and self.stream.tell() == 0:
1334
+ self.stream.read(self._bom)
1335
+
1336
+ # Read the requested number of bytes.
1337
+ if size is None:
1338
+ new_bytes = self.stream.read()
1339
+ else:
1340
+ new_bytes = self.stream.read(size)
1341
+ bytes = self.bytebuffer + new_bytes
1342
+
1343
+ # Decode the bytes into unicode characters
1344
+ chars, bytes_decoded = self._incr_decode(bytes)
1345
+
1346
+ # If we got bytes but couldn't decode any, then read further.
1347
+ if (size is not None) and (not chars) and (len(new_bytes) > 0):
1348
+ while not chars:
1349
+ new_bytes = self.stream.read(1)
1350
+ if not new_bytes:
1351
+ break # end of file.
1352
+ bytes += new_bytes
1353
+ chars, bytes_decoded = self._incr_decode(bytes)
1354
+
1355
+ # Record any bytes we didn't consume.
1356
+ self.bytebuffer = bytes[bytes_decoded:]
1357
+
1358
+ # Return the result
1359
+ return chars
1360
+
1361
+ def _incr_decode(self, bytes):
1362
+ """
1363
+ Decode the given byte string into a unicode string, using this
1364
+ reader's encoding. If an exception is encountered that
1365
+ appears to be caused by a truncation error, then just decode
1366
+ the byte string without the bytes that cause the trunctaion
1367
+ error.
1368
+
1369
+ Return a tuple ``(chars, num_consumed)``, where ``chars`` is
1370
+ the decoded unicode string, and ``num_consumed`` is the
1371
+ number of bytes that were consumed.
1372
+ """
1373
+ while True:
1374
+ try:
1375
+ return self.decode(bytes, "strict")
1376
+ except UnicodeDecodeError as exc:
1377
+ # If the exception occurs at the end of the string,
1378
+ # then assume that it's a truncation error.
1379
+ if exc.end == len(bytes):
1380
+ return self.decode(bytes[: exc.start], self.errors)
1381
+
1382
+ # Otherwise, if we're being strict, then raise it.
1383
+ elif self.errors == "strict":
1384
+ raise
1385
+
1386
+ # If we're not strict, then re-process it with our
1387
+ # errors setting. This *may* raise an exception.
1388
+ else:
1389
+ return self.decode(bytes, self.errors)
1390
+
1391
+ _BOM_TABLE = {
1392
+ "utf8": [(codecs.BOM_UTF8, None)],
1393
+ "utf16": [(codecs.BOM_UTF16_LE, "utf16-le"), (codecs.BOM_UTF16_BE, "utf16-be")],
1394
+ "utf16le": [(codecs.BOM_UTF16_LE, None)],
1395
+ "utf16be": [(codecs.BOM_UTF16_BE, None)],
1396
+ "utf32": [(codecs.BOM_UTF32_LE, "utf32-le"), (codecs.BOM_UTF32_BE, "utf32-be")],
1397
+ "utf32le": [(codecs.BOM_UTF32_LE, None)],
1398
+ "utf32be": [(codecs.BOM_UTF32_BE, None)],
1399
+ }
1400
+
1401
+ def _check_bom(self):
1402
+ # Normalize our encoding name
1403
+ enc = re.sub("[ -]", "", self.encoding.lower())
1404
+
1405
+ # Look up our encoding in the BOM table.
1406
+ bom_info = self._BOM_TABLE.get(enc)
1407
+
1408
+ if bom_info:
1409
+ # Read a prefix, to check against the BOM(s)
1410
+ bytes = self.stream.read(16)
1411
+ self.stream.seek(0)
1412
+
1413
+ # Check for each possible BOM.
1414
+ for (bom, new_encoding) in bom_info:
1415
+ if bytes.startswith(bom):
1416
+ if new_encoding:
1417
+ self.encoding = new_encoding
1418
+ return len(bom)
1419
+
1420
+ return None
1421
+
1422
+
1423
+ __all__ = [
1424
+ "path",
1425
+ "PathPointer",
1426
+ "FileSystemPathPointer",
1427
+ "BufferedGzipFile",
1428
+ "GzipFileSystemPathPointer",
1429
+ "GzipFileSystemPathPointer",
1430
+ "find",
1431
+ "retrieve",
1432
+ "FORMATS",
1433
+ "AUTO_FORMATS",
1434
+ "load",
1435
+ "show_cfg",
1436
+ "clear_cache",
1437
+ "LazyLoader",
1438
+ "OpenOnDemandZipFile",
1439
+ "GzipFileSystemPathPointer",
1440
+ "SeekableUnicodeStreamReader",
1441
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/decorators.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Decorator module by Michele Simionato <[email protected]>
3
+ Copyright Michele Simionato, distributed under the terms of the BSD License (see below).
4
+ http://www.phyast.pitt.edu/~micheles/python/documentation.html
5
+
6
+ Included in NLTK for its support of a nice memoization decorator.
7
+ """
8
+
9
+ __docformat__ = "restructuredtext en"
10
+
11
+ ## The basic trick is to generate the source code for the decorated function
12
+ ## with the right signature and to evaluate it.
13
+ ## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator
14
+ ## to understand what is going on.
15
+
16
+ __all__ = ["decorator", "new_wrapper", "getinfo"]
17
+
18
+ import sys
19
+
20
+ # Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
21
+ # the Python standard library.
22
+ OLD_SYS_PATH = sys.path[:]
23
+ sys.path = [p for p in sys.path if p and "nltk" not in str(p)]
24
+ import inspect
25
+
26
+ sys.path = OLD_SYS_PATH
27
+
28
+
29
+ def __legacysignature(signature):
30
+ """
31
+ For retrocompatibility reasons, we don't use a standard Signature.
32
+ Instead, we use the string generated by this method.
33
+ Basically, from a Signature we create a string and remove the default values.
34
+ """
35
+ listsignature = str(signature)[1:-1].split(",")
36
+ for counter, param in enumerate(listsignature):
37
+ if param.count("=") > 0:
38
+ listsignature[counter] = param[0 : param.index("=")].strip()
39
+ else:
40
+ listsignature[counter] = param.strip()
41
+ return ", ".join(listsignature)
42
+
43
+
44
+ def getinfo(func):
45
+ """
46
+ Returns an info dictionary containing:
47
+ - name (the name of the function : str)
48
+ - argnames (the names of the arguments : list)
49
+ - defaults (the values of the default arguments : tuple)
50
+ - signature (the signature : str)
51
+ - fullsignature (the full signature : Signature)
52
+ - doc (the docstring : str)
53
+ - module (the module name : str)
54
+ - dict (the function __dict__ : str)
55
+
56
+ >>> def f(self, x=1, y=2, *args, **kw): pass
57
+
58
+ >>> info = getinfo(f)
59
+
60
+ >>> info["name"]
61
+ 'f'
62
+ >>> info["argnames"]
63
+ ['self', 'x', 'y', 'args', 'kw']
64
+
65
+ >>> info["defaults"]
66
+ (1, 2)
67
+
68
+ >>> info["signature"]
69
+ 'self, x, y, *args, **kw'
70
+
71
+ >>> info["fullsignature"]
72
+ <Signature (self, x=1, y=2, *args, **kw)>
73
+ """
74
+ assert inspect.ismethod(func) or inspect.isfunction(func)
75
+ argspec = inspect.getfullargspec(func)
76
+ regargs, varargs, varkwargs = argspec[:3]
77
+ argnames = list(regargs)
78
+ if varargs:
79
+ argnames.append(varargs)
80
+ if varkwargs:
81
+ argnames.append(varkwargs)
82
+ fullsignature = inspect.signature(func)
83
+ # Convert Signature to str
84
+ signature = __legacysignature(fullsignature)
85
+
86
+ # pypy compatibility
87
+ if hasattr(func, "__closure__"):
88
+ _closure = func.__closure__
89
+ _globals = func.__globals__
90
+ else:
91
+ _closure = func.func_closure
92
+ _globals = func.func_globals
93
+
94
+ return dict(
95
+ name=func.__name__,
96
+ argnames=argnames,
97
+ signature=signature,
98
+ fullsignature=fullsignature,
99
+ defaults=func.__defaults__,
100
+ doc=func.__doc__,
101
+ module=func.__module__,
102
+ dict=func.__dict__,
103
+ globals=_globals,
104
+ closure=_closure,
105
+ )
106
+
107
+
108
+ def update_wrapper(wrapper, model, infodict=None):
109
+ "akin to functools.update_wrapper"
110
+ infodict = infodict or getinfo(model)
111
+ wrapper.__name__ = infodict["name"]
112
+ wrapper.__doc__ = infodict["doc"]
113
+ wrapper.__module__ = infodict["module"]
114
+ wrapper.__dict__.update(infodict["dict"])
115
+ wrapper.__defaults__ = infodict["defaults"]
116
+ wrapper.undecorated = model
117
+ return wrapper
118
+
119
+
120
+ def new_wrapper(wrapper, model):
121
+ """
122
+ An improvement over functools.update_wrapper. The wrapper is a generic
123
+ callable object. It works by generating a copy of the wrapper with the
124
+ right signature and by updating the copy, not the original.
125
+ Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',
126
+ 'dict', 'defaults'.
127
+ """
128
+ if isinstance(model, dict):
129
+ infodict = model
130
+ else: # assume model is a function
131
+ infodict = getinfo(model)
132
+ assert (
133
+ not "_wrapper_" in infodict["argnames"]
134
+ ), '"_wrapper_" is a reserved argument name!'
135
+ src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict
136
+ funcopy = eval(src, dict(_wrapper_=wrapper))
137
+ return update_wrapper(funcopy, model, infodict)
138
+
139
+
140
+ # helper used in decorator_factory
141
+ def __call__(self, func):
142
+ return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func)
143
+
144
+
145
+ def decorator_factory(cls):
146
+ """
147
+ Take a class with a ``.caller`` method and return a callable decorator
148
+ object. It works by adding a suitable __call__ method to the class;
149
+ it raises a TypeError if the class already has a nontrivial __call__
150
+ method.
151
+ """
152
+ attrs = set(dir(cls))
153
+ if "__call__" in attrs:
154
+ raise TypeError(
155
+ "You cannot decorate a class with a nontrivial " "__call__ method"
156
+ )
157
+ if "call" not in attrs:
158
+ raise TypeError("You cannot decorate a class without a " ".call method")
159
+ cls.__call__ = __call__
160
+ return cls
161
+
162
+
163
+ def decorator(caller):
164
+ """
165
+ General purpose decorator factory: takes a caller function as
166
+ input and returns a decorator with the same attributes.
167
+ A caller function is any function like this::
168
+
169
+ def caller(func, *args, **kw):
170
+ # do something
171
+ return func(*args, **kw)
172
+
173
+ Here is an example of usage:
174
+
175
+ >>> @decorator
176
+ ... def chatty(f, *args, **kw):
177
+ ... print("Calling %r" % f.__name__)
178
+ ... return f(*args, **kw)
179
+
180
+ >>> chatty.__name__
181
+ 'chatty'
182
+
183
+ >>> @chatty
184
+ ... def f(): pass
185
+ ...
186
+ >>> f()
187
+ Calling 'f'
188
+
189
+ decorator can also take in input a class with a .caller method; in this
190
+ case it converts the class into a factory of callable decorator objects.
191
+ See the documentation for an example.
192
+ """
193
+ if inspect.isclass(caller):
194
+ return decorator_factory(caller)
195
+
196
+ def _decorator(func): # the real meat is here
197
+ infodict = getinfo(func)
198
+ argnames = infodict["argnames"]
199
+ assert not (
200
+ "_call_" in argnames or "_func_" in argnames
201
+ ), "You cannot use _call_ or _func_ as argument names!"
202
+ src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict
203
+ # import sys; print >> sys.stderr, src # for debugging purposes
204
+ dec_func = eval(src, dict(_func_=func, _call_=caller))
205
+ return update_wrapper(dec_func, func, infodict)
206
+
207
+ return update_wrapper(_decorator, caller)
208
+
209
+
210
+ def getattr_(obj, name, default_thunk):
211
+ "Similar to .setdefault in dictionaries."
212
+ try:
213
+ return getattr(obj, name)
214
+ except AttributeError:
215
+ default = default_thunk()
216
+ setattr(obj, name, default)
217
+ return default
218
+
219
+
220
+ @decorator
221
+ def memoize(func, *args):
222
+ dic = getattr_(func, "memoize_dic", dict)
223
+ # memoize_dic is created at the first call
224
+ if args in dic:
225
+ return dic[args]
226
+ result = func(*args)
227
+ dic[args] = result
228
+ return result
229
+
230
+
231
+ ########################## LEGALESE ###############################
232
+
233
+ ## Redistributions of source code must retain the above copyright
234
+ ## notice, this list of conditions and the following disclaimer.
235
+ ## Redistributions in bytecode form must reproduce the above copyright
236
+ ## notice, this list of conditions and the following disclaimer in
237
+ ## the documentation and/or other materials provided with the
238
+ ## distribution.
239
+
240
+ ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
241
+ ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
242
+ ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
243
+ ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
244
+ ## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
245
+ ## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
246
+ ## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
247
+ ## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
248
+ ## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
249
+ ## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
250
+ ## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
251
+ ## DAMAGE.
env-llmeval/lib/python3.10/site-packages/nltk/downloader.py ADDED
@@ -0,0 +1,2559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus & Model Downloader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The NLTK corpus and module downloader. This module defines several
10
+ interfaces which can be used to download corpora, models, and other
11
+ data packages that can be used with NLTK.
12
+
13
+ Downloading Packages
14
+ ====================
15
+ If called with no arguments, ``download()`` will display an interactive
16
+ interface which can be used to download and install new packages.
17
+ If Tkinter is available, then a graphical interface will be shown,
18
+ otherwise a simple text interface will be provided.
19
+
20
+ Individual packages can be downloaded by calling the ``download()``
21
+ function with a single argument, giving the package identifier for the
22
+ package that should be downloaded:
23
+
24
+ >>> download('treebank') # doctest: +SKIP
25
+ [nltk_data] Downloading package 'treebank'...
26
+ [nltk_data] Unzipping corpora/treebank.zip.
27
+
28
+ NLTK also provides a number of \"package collections\", consisting of
29
+ a group of related packages. To download all packages in a
30
+ colleciton, simply call ``download()`` with the collection's
31
+ identifier:
32
+
33
+ >>> download('all-corpora') # doctest: +SKIP
34
+ [nltk_data] Downloading package 'abc'...
35
+ [nltk_data] Unzipping corpora/abc.zip.
36
+ [nltk_data] Downloading package 'alpino'...
37
+ [nltk_data] Unzipping corpora/alpino.zip.
38
+ ...
39
+ [nltk_data] Downloading package 'words'...
40
+ [nltk_data] Unzipping corpora/words.zip.
41
+
42
+ Download Directory
43
+ ==================
44
+ By default, packages are installed in either a system-wide directory
45
+ (if Python has sufficient access to write to it); or in the current
46
+ user's home directory. However, the ``download_dir`` argument may be
47
+ used to specify a different installation target, if desired.
48
+
49
+ See ``Downloader.default_download_dir()`` for more a detailed
50
+ description of how the default download directory is chosen.
51
+
52
+ NLTK Download Server
53
+ ====================
54
+ Before downloading any packages, the corpus and module downloader
55
+ contacts the NLTK download server, to retrieve an index file
56
+ describing the available packages. By default, this index file is
57
+ loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
58
+ If necessary, it is possible to create a new ``Downloader`` object,
59
+ specifying a different URL for the package index file.
60
+
61
+ Usage::
62
+
63
+ python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
64
+
65
+ or::
66
+
67
+ python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
68
+ """
69
+ # ----------------------------------------------------------------------
70
+
71
+ """
72
+
73
+ 0 1 2 3
74
+ [label][----][label][----]
75
+ [column ][column ]
76
+
77
+ Notes
78
+ =====
79
+ Handling data files.. Some questions:
80
+
81
+ * Should the data files be kept zipped or unzipped? I say zipped.
82
+
83
+ * Should the data files be kept in svn at all? Advantages: history;
84
+ automatic version numbers; 'svn up' could be used rather than the
85
+ downloader to update the corpora. Disadvantages: they're big,
86
+ which makes working from svn a bit of a pain. And we're planning
87
+ to potentially make them much bigger. I don't think we want
88
+ people to have to download 400MB corpora just to use nltk from svn.
89
+
90
+ * Compromise: keep the data files in trunk/data rather than in
91
+ trunk/nltk. That way you can check them out in svn if you want
92
+ to; but you don't need to, and you can use the downloader instead.
93
+
94
+ * Also: keep models in mind. When we change the code, we'd
95
+ potentially like the models to get updated. This could require a
96
+ little thought.
97
+
98
+ * So.. let's assume we have a trunk/data directory, containing a bunch
99
+ of packages. The packages should be kept as zip files, because we
100
+ really shouldn't be editing them much (well -- we may edit models
101
+ more, but they tend to be binary-ish files anyway, where diffs
102
+ aren't that helpful). So we'll have trunk/data, with a bunch of
103
+ files like abc.zip and treebank.zip and propbank.zip. For each
104
+ package we could also have eg treebank.xml and propbank.xml,
105
+ describing the contents of the package (name, copyright, license,
106
+ etc). Collections would also have .xml files. Finally, we would
107
+ pull all these together to form a single index.xml file. Some
108
+ directory structure wouldn't hurt. So how about::
109
+
110
+ /trunk/data/ ....................... root of data svn
111
+ index.xml ........................ main index file
112
+ src/ ............................. python scripts
113
+ packages/ ........................ dir for packages
114
+ corpora/ ....................... zip & xml files for corpora
115
+ grammars/ ...................... zip & xml files for grammars
116
+ taggers/ ....................... zip & xml files for taggers
117
+ tokenizers/ .................... zip & xml files for tokenizers
118
+ etc.
119
+ collections/ ..................... xml files for collections
120
+
121
+ Where the root (/trunk/data) would contain a makefile; and src/
122
+ would contain a script to update the info.xml file. It could also
123
+ contain scripts to rebuild some of the various model files. The
124
+ script that builds index.xml should probably check that each zip
125
+ file expands entirely into a single subdir, whose name matches the
126
+ package's uid.
127
+
128
+ Changes I need to make:
129
+ - in index: change "size" to "filesize" or "compressed-size"
130
+ - in index: add "unzipped-size"
131
+ - when checking status: check both compressed & uncompressed size.
132
+ uncompressed size is important to make sure we detect a problem
133
+ if something got partially unzipped. define new status values
134
+ to differentiate stale vs corrupt vs corruptly-uncompressed??
135
+ (we shouldn't need to re-download the file if the zip file is ok
136
+ but it didn't get uncompressed fully.)
137
+ - add other fields to the index: author, license, copyright, contact,
138
+ etc.
139
+
140
+ the current grammars/ package would become a single new package (eg
141
+ toy-grammars or book-grammars).
142
+
143
+ xml file should have:
144
+ - authorship info
145
+ - license info
146
+ - copyright info
147
+ - contact info
148
+ - info about what type of data/annotation it contains?
149
+ - recommended corpus reader?
150
+
151
+ collections can contain other collections. they can also contain
152
+ multiple package types (corpora & models). Have a single 'basics'
153
+ package that includes everything we talk about in the book?
154
+
155
+ n.b.: there will have to be a fallback to the punkt tokenizer, in case
156
+ they didn't download that model.
157
+
158
+ default: unzip or not?
159
+
160
+ """
161
+ import functools
162
+ import itertools
163
+ import os
164
+ import shutil
165
+ import subprocess
166
+ import sys
167
+ import textwrap
168
+ import threading
169
+ import time
170
+ import warnings
171
+ import zipfile
172
+ from hashlib import md5
173
+ from xml.etree import ElementTree
174
+
175
+ try:
176
+ TKINTER = True
177
+ from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk
178
+ from tkinter.messagebox import showerror
179
+
180
+ from nltk.draw.table import Table
181
+ from nltk.draw.util import ShowText
182
+ except ImportError:
183
+ TKINTER = False
184
+ TclError = ValueError
185
+
186
+ from urllib.error import HTTPError, URLError
187
+ from urllib.request import urlopen
188
+
189
+ import nltk
190
+
191
+ # urllib2 = nltk.internals.import_from_stdlib('urllib2')
192
+
193
+
194
+ ######################################################################
195
+ # Directory entry objects (from the data server's index file)
196
+ ######################################################################
197
+
198
+
199
+ class Package:
200
+ """
201
+ A directory entry for a downloadable package. These entries are
202
+ extracted from the XML index file that is downloaded by
203
+ ``Downloader``. Each package consists of a single file; but if
204
+ that file is a zip file, then it can be automatically decompressed
205
+ when the package is installed.
206
+ """
207
+
208
+ def __init__(
209
+ self,
210
+ id,
211
+ url,
212
+ name=None,
213
+ subdir="",
214
+ size=None,
215
+ unzipped_size=None,
216
+ checksum=None,
217
+ svn_revision=None,
218
+ copyright="Unknown",
219
+ contact="Unknown",
220
+ license="Unknown",
221
+ author="Unknown",
222
+ unzip=True,
223
+ **kw,
224
+ ):
225
+ self.id = id
226
+ """A unique identifier for this package."""
227
+
228
+ self.name = name or id
229
+ """A string name for this package."""
230
+
231
+ self.subdir = subdir
232
+ """The subdirectory where this package should be installed.
233
+ E.g., ``'corpora'`` or ``'taggers'``."""
234
+
235
+ self.url = url
236
+ """A URL that can be used to download this package's file."""
237
+
238
+ self.size = int(size)
239
+ """The filesize (in bytes) of the package file."""
240
+
241
+ self.unzipped_size = int(unzipped_size)
242
+ """The total filesize of the files contained in the package's
243
+ zipfile."""
244
+
245
+ self.checksum = checksum
246
+ """The MD-5 checksum of the package file."""
247
+
248
+ self.svn_revision = svn_revision
249
+ """A subversion revision number for this package."""
250
+
251
+ self.copyright = copyright
252
+ """Copyright holder for this package."""
253
+
254
+ self.contact = contact
255
+ """Name & email of the person who should be contacted with
256
+ questions about this package."""
257
+
258
+ self.license = license
259
+ """License information for this package."""
260
+
261
+ self.author = author
262
+ """Author of this package."""
263
+
264
+ ext = os.path.splitext(url.split("/")[-1])[1]
265
+ self.filename = os.path.join(subdir, id + ext)
266
+ """The filename that should be used for this package's file. It
267
+ is formed by joining ``self.subdir`` with ``self.id``, and
268
+ using the same extension as ``url``."""
269
+
270
+ self.unzip = bool(int(unzip)) # '0' or '1'
271
+ """A flag indicating whether this corpus should be unzipped by
272
+ default."""
273
+
274
+ # Include any other attributes provided by the XML file.
275
+ self.__dict__.update(kw)
276
+
277
+ @staticmethod
278
+ def fromxml(xml):
279
+ if isinstance(xml, str):
280
+ xml = ElementTree.parse(xml)
281
+ for key in xml.attrib:
282
+ xml.attrib[key] = str(xml.attrib[key])
283
+ return Package(**xml.attrib)
284
+
285
+ def __lt__(self, other):
286
+ return self.id < other.id
287
+
288
+ def __repr__(self):
289
+ return "<Package %s>" % self.id
290
+
291
+
292
+ class Collection:
293
+ """
294
+ A directory entry for a collection of downloadable packages.
295
+ These entries are extracted from the XML index file that is
296
+ downloaded by ``Downloader``.
297
+ """
298
+
299
+ def __init__(self, id, children, name=None, **kw):
300
+ self.id = id
301
+ """A unique identifier for this collection."""
302
+
303
+ self.name = name or id
304
+ """A string name for this collection."""
305
+
306
+ self.children = children
307
+ """A list of the ``Collections`` or ``Packages`` directly
308
+ contained by this collection."""
309
+
310
+ self.packages = None
311
+ """A list of ``Packages`` contained by this collection or any
312
+ collections it recursively contains."""
313
+
314
+ # Include any other attributes provided by the XML file.
315
+ self.__dict__.update(kw)
316
+
317
+ @staticmethod
318
+ def fromxml(xml):
319
+ if isinstance(xml, str):
320
+ xml = ElementTree.parse(xml)
321
+ for key in xml.attrib:
322
+ xml.attrib[key] = str(xml.attrib[key])
323
+ children = [child.get("ref") for child in xml.findall("item")]
324
+ return Collection(children=children, **xml.attrib)
325
+
326
+ def __lt__(self, other):
327
+ return self.id < other.id
328
+
329
+ def __repr__(self):
330
+ return "<Collection %s>" % self.id
331
+
332
+
333
+ ######################################################################
334
+ # Message Passing Objects
335
+ ######################################################################
336
+
337
+
338
+ class DownloaderMessage:
339
+ """A status message object, used by ``incr_download`` to
340
+ communicate its progress."""
341
+
342
+
343
+ class StartCollectionMessage(DownloaderMessage):
344
+ """Data server has started working on a collection of packages."""
345
+
346
+ def __init__(self, collection):
347
+ self.collection = collection
348
+
349
+
350
+ class FinishCollectionMessage(DownloaderMessage):
351
+ """Data server has finished working on a collection of packages."""
352
+
353
+ def __init__(self, collection):
354
+ self.collection = collection
355
+
356
+
357
+ class StartPackageMessage(DownloaderMessage):
358
+ """Data server has started working on a package."""
359
+
360
+ def __init__(self, package):
361
+ self.package = package
362
+
363
+
364
+ class FinishPackageMessage(DownloaderMessage):
365
+ """Data server has finished working on a package."""
366
+
367
+ def __init__(self, package):
368
+ self.package = package
369
+
370
+
371
+ class StartDownloadMessage(DownloaderMessage):
372
+ """Data server has started downloading a package."""
373
+
374
+ def __init__(self, package):
375
+ self.package = package
376
+
377
+
378
+ class FinishDownloadMessage(DownloaderMessage):
379
+ """Data server has finished downloading a package."""
380
+
381
+ def __init__(self, package):
382
+ self.package = package
383
+
384
+
385
+ class StartUnzipMessage(DownloaderMessage):
386
+ """Data server has started unzipping a package."""
387
+
388
+ def __init__(self, package):
389
+ self.package = package
390
+
391
+
392
+ class FinishUnzipMessage(DownloaderMessage):
393
+ """Data server has finished unzipping a package."""
394
+
395
+ def __init__(self, package):
396
+ self.package = package
397
+
398
+
399
+ class UpToDateMessage(DownloaderMessage):
400
+ """The package download file is already up-to-date"""
401
+
402
+ def __init__(self, package):
403
+ self.package = package
404
+
405
+
406
+ class StaleMessage(DownloaderMessage):
407
+ """The package download file is out-of-date or corrupt"""
408
+
409
+ def __init__(self, package):
410
+ self.package = package
411
+
412
+
413
+ class ErrorMessage(DownloaderMessage):
414
+ """Data server encountered an error"""
415
+
416
+ def __init__(self, package, message):
417
+ self.package = package
418
+ if isinstance(message, Exception):
419
+ self.message = str(message)
420
+ else:
421
+ self.message = message
422
+
423
+
424
+ class ProgressMessage(DownloaderMessage):
425
+ """Indicates how much progress the data server has made"""
426
+
427
+ def __init__(self, progress):
428
+ self.progress = progress
429
+
430
+
431
+ class SelectDownloadDirMessage(DownloaderMessage):
432
+ """Indicates what download directory the data server is using"""
433
+
434
+ def __init__(self, download_dir):
435
+ self.download_dir = download_dir
436
+
437
+
438
+ ######################################################################
439
+ # NLTK Data Server
440
+ ######################################################################
441
+
442
+
443
+ class Downloader:
444
+ """
445
+ A class used to access the NLTK data server, which can be used to
446
+ download corpora and other data packages.
447
+ """
448
+
449
+ # /////////////////////////////////////////////////////////////////
450
+ # Configuration
451
+ # /////////////////////////////////////////////////////////////////
452
+
453
+ INDEX_TIMEOUT = 60 * 60 # 1 hour
454
+ """The amount of time after which the cached copy of the data
455
+ server index will be considered 'stale,' and will be
456
+ re-downloaded."""
457
+
458
+ DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml"
459
+ """The default URL for the NLTK data server's index. An
460
+ alternative URL can be specified when creating a new
461
+ ``Downloader`` object."""
462
+
463
+ # /////////////////////////////////////////////////////////////////
464
+ # Status Constants
465
+ # /////////////////////////////////////////////////////////////////
466
+
467
+ INSTALLED = "installed"
468
+ """A status string indicating that a package or collection is
469
+ installed and up-to-date."""
470
+ NOT_INSTALLED = "not installed"
471
+ """A status string indicating that a package or collection is
472
+ not installed."""
473
+ STALE = "out of date"
474
+ """A status string indicating that a package or collection is
475
+ corrupt or out-of-date."""
476
+ PARTIAL = "partial"
477
+ """A status string indicating that a collection is partially
478
+ installed (i.e., only some of its packages are installed.)"""
479
+
480
+ # /////////////////////////////////////////////////////////////////
481
+ # Constructor
482
+ # /////////////////////////////////////////////////////////////////
483
+
484
+ def __init__(self, server_index_url=None, download_dir=None):
485
+ self._url = server_index_url or self.DEFAULT_URL
486
+ """The URL for the data server's index file."""
487
+
488
+ self._collections = {}
489
+ """Dictionary from collection identifier to ``Collection``"""
490
+
491
+ self._packages = {}
492
+ """Dictionary from package identifier to ``Package``"""
493
+
494
+ self._download_dir = download_dir
495
+ """The default directory to which packages will be downloaded."""
496
+
497
+ self._index = None
498
+ """The XML index file downloaded from the data server"""
499
+
500
+ self._index_timestamp = None
501
+ """Time at which ``self._index`` was downloaded. If it is more
502
+ than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
503
+
504
+ self._status_cache = {}
505
+ """Dictionary from package/collection identifier to status
506
+ string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
507
+ ``PARTIAL``). Cache is used for packages only, not
508
+ collections."""
509
+
510
+ self._errors = None
511
+ """Flag for telling if all packages got successfully downloaded or not."""
512
+
513
+ # decide where we're going to save things to.
514
+ if self._download_dir is None:
515
+ self._download_dir = self.default_download_dir()
516
+
517
+ # /////////////////////////////////////////////////////////////////
518
+ # Information
519
+ # /////////////////////////////////////////////////////////////////
520
+
521
+ def list(
522
+ self,
523
+ download_dir=None,
524
+ show_packages=True,
525
+ show_collections=True,
526
+ header=True,
527
+ more_prompt=False,
528
+ skip_installed=False,
529
+ ):
530
+ lines = 0 # for more_prompt
531
+ if download_dir is None:
532
+ download_dir = self._download_dir
533
+ print("Using default data directory (%s)" % download_dir)
534
+ if header:
535
+ print("=" * (26 + len(self._url)))
536
+ print(" Data server index for <%s>" % self._url)
537
+ print("=" * (26 + len(self._url)))
538
+ lines += 3 # for more_prompt
539
+ stale = partial = False
540
+
541
+ categories = []
542
+ if show_packages:
543
+ categories.append("packages")
544
+ if show_collections:
545
+ categories.append("collections")
546
+ for category in categories:
547
+ print("%s:" % category.capitalize())
548
+ lines += 1 # for more_prompt
549
+ for info in sorted(getattr(self, category)(), key=str):
550
+ status = self.status(info, download_dir)
551
+ if status == self.INSTALLED and skip_installed:
552
+ continue
553
+ if status == self.STALE:
554
+ stale = True
555
+ if status == self.PARTIAL:
556
+ partial = True
557
+ prefix = {
558
+ self.INSTALLED: "*",
559
+ self.STALE: "-",
560
+ self.PARTIAL: "P",
561
+ self.NOT_INSTALLED: " ",
562
+ }[status]
563
+ name = textwrap.fill(
564
+ "-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " "
565
+ )[27:]
566
+ print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name))
567
+ lines += len(name.split("\n")) # for more_prompt
568
+ if more_prompt and lines > 20:
569
+ user_input = input("Hit Enter to continue: ")
570
+ if user_input.lower() in ("x", "q"):
571
+ return
572
+ lines = 0
573
+ print()
574
+ msg = "([*] marks installed packages"
575
+ if stale:
576
+ msg += "; [-] marks out-of-date or corrupt packages"
577
+ if partial:
578
+ msg += "; [P] marks partially installed collections"
579
+ print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76))
580
+
581
+ def packages(self):
582
+ self._update_index()
583
+ return self._packages.values()
584
+
585
+ def corpora(self):
586
+ self._update_index()
587
+ return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"]
588
+
589
+ def models(self):
590
+ self._update_index()
591
+ return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"]
592
+
593
+ def collections(self):
594
+ self._update_index()
595
+ return self._collections.values()
596
+
597
+ # /////////////////////////////////////////////////////////////////
598
+ # Downloading
599
+ # /////////////////////////////////////////////////////////////////
600
+
601
+ def _info_or_id(self, info_or_id):
602
+ if isinstance(info_or_id, str):
603
+ return self.info(info_or_id)
604
+ else:
605
+ return info_or_id
606
+
607
+ # [xx] When during downloading is it 'safe' to abort? Only unsafe
608
+ # time is *during* an unzip -- we don't want to leave a
609
+ # partially-unzipped corpus in place because we wouldn't notice
610
+ # it. But if we had the exact total size of the unzipped corpus,
611
+ # then that would be fine. Then we could abort anytime we want!
612
+ # So this is really what we should do. That way the threaded
613
+ # downloader in the gui can just kill the download thread anytime
614
+ # it wants.
615
+
616
+ def incr_download(self, info_or_id, download_dir=None, force=False):
617
+ # If they didn't specify a download_dir, then use the default one.
618
+ if download_dir is None:
619
+ download_dir = self._download_dir
620
+ yield SelectDownloadDirMessage(download_dir)
621
+
622
+ # If they gave us a list of ids, then download each one.
623
+ if isinstance(info_or_id, (list, tuple)):
624
+ yield from self._download_list(info_or_id, download_dir, force)
625
+ return
626
+
627
+ # Look up the requested collection or package.
628
+ try:
629
+ info = self._info_or_id(info_or_id)
630
+ except (OSError, ValueError) as e:
631
+ yield ErrorMessage(None, f"Error loading {info_or_id}: {e}")
632
+ return
633
+
634
+ # Handle collections.
635
+ if isinstance(info, Collection):
636
+ yield StartCollectionMessage(info)
637
+ yield from self.incr_download(info.children, download_dir, force)
638
+ yield FinishCollectionMessage(info)
639
+
640
+ # Handle Packages (delegate to a helper function).
641
+ else:
642
+ yield from self._download_package(info, download_dir, force)
643
+
644
+ def _num_packages(self, item):
645
+ if isinstance(item, Package):
646
+ return 1
647
+ else:
648
+ return len(item.packages)
649
+
650
+ def _download_list(self, items, download_dir, force):
651
+ # Look up the requested items.
652
+ for i in range(len(items)):
653
+ try:
654
+ items[i] = self._info_or_id(items[i])
655
+ except (OSError, ValueError) as e:
656
+ yield ErrorMessage(items[i], e)
657
+ return
658
+
659
+ # Download each item, re-scaling their progress.
660
+ num_packages = sum(self._num_packages(item) for item in items)
661
+ progress = 0
662
+ for i, item in enumerate(items):
663
+ if isinstance(item, Package):
664
+ delta = 1.0 / num_packages
665
+ else:
666
+ delta = len(item.packages) / num_packages
667
+ for msg in self.incr_download(item, download_dir, force):
668
+ if isinstance(msg, ProgressMessage):
669
+ yield ProgressMessage(progress + msg.progress * delta)
670
+ else:
671
+ yield msg
672
+
673
+ progress += 100 * delta
674
+
675
+ def _download_package(self, info, download_dir, force):
676
+ yield StartPackageMessage(info)
677
+ yield ProgressMessage(0)
678
+
679
+ # Do we already have the current version?
680
+ status = self.status(info, download_dir)
681
+ if not force and status == self.INSTALLED:
682
+ yield UpToDateMessage(info)
683
+ yield ProgressMessage(100)
684
+ yield FinishPackageMessage(info)
685
+ return
686
+
687
+ # Remove the package from our status cache
688
+ self._status_cache.pop(info.id, None)
689
+
690
+ # Check for (and remove) any old/stale version.
691
+ filepath = os.path.join(download_dir, info.filename)
692
+ if os.path.exists(filepath):
693
+ if status == self.STALE:
694
+ yield StaleMessage(info)
695
+ os.remove(filepath)
696
+
697
+ # Ensure the download_dir exists
698
+ if not os.path.exists(download_dir):
699
+ os.makedirs(download_dir)
700
+ if not os.path.exists(os.path.join(download_dir, info.subdir)):
701
+ os.makedirs(os.path.join(download_dir, info.subdir))
702
+
703
+ # Download the file. This will raise an IOError if the url
704
+ # is not found.
705
+ yield StartDownloadMessage(info)
706
+ yield ProgressMessage(5)
707
+ try:
708
+ infile = urlopen(info.url)
709
+ with open(filepath, "wb") as outfile:
710
+ num_blocks = max(1, info.size / (1024 * 16))
711
+ for block in itertools.count():
712
+ s = infile.read(1024 * 16) # 16k blocks.
713
+ outfile.write(s)
714
+ if not s:
715
+ break
716
+ if block % 2 == 0: # how often?
717
+ yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
718
+ infile.close()
719
+ except OSError as e:
720
+ yield ErrorMessage(
721
+ info,
722
+ "Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e),
723
+ )
724
+ return
725
+ yield FinishDownloadMessage(info)
726
+ yield ProgressMessage(80)
727
+
728
+ # If it's a zipfile, uncompress it.
729
+ if info.filename.endswith(".zip"):
730
+ zipdir = os.path.join(download_dir, info.subdir)
731
+ # Unzip if we're unzipping by default; *or* if it's already
732
+ # been unzipped (presumably a previous version).
733
+ if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
734
+ yield StartUnzipMessage(info)
735
+ for msg in _unzip_iter(filepath, zipdir, verbose=False):
736
+ # Somewhat of a hack, but we need a proper package reference
737
+ msg.package = info
738
+ yield msg
739
+ yield FinishUnzipMessage(info)
740
+
741
+ yield FinishPackageMessage(info)
742
+
743
+ def download(
744
+ self,
745
+ info_or_id=None,
746
+ download_dir=None,
747
+ quiet=False,
748
+ force=False,
749
+ prefix="[nltk_data] ",
750
+ halt_on_error=True,
751
+ raise_on_error=False,
752
+ print_error_to=sys.stderr,
753
+ ):
754
+
755
+ print_to = functools.partial(print, file=print_error_to)
756
+ # If no info or id is given, then use the interactive shell.
757
+ if info_or_id is None:
758
+ # [xx] hmm -- changing self._download_dir here seems like
759
+ # the wrong thing to do. Maybe the _interactive_download
760
+ # function should make a new copy of self to use?
761
+ if download_dir is not None:
762
+ self._download_dir = download_dir
763
+ self._interactive_download()
764
+ return True
765
+
766
+ else:
767
+ # Define a helper function for displaying output:
768
+ def show(s, prefix2=""):
769
+ print_to(
770
+ textwrap.fill(
771
+ s,
772
+ initial_indent=prefix + prefix2,
773
+ subsequent_indent=prefix + prefix2 + " " * 4,
774
+ )
775
+ )
776
+
777
+ for msg in self.incr_download(info_or_id, download_dir, force):
778
+ # Error messages
779
+ if isinstance(msg, ErrorMessage):
780
+ show(msg.message)
781
+ if raise_on_error:
782
+ raise ValueError(msg.message)
783
+ if halt_on_error:
784
+ return False
785
+ self._errors = True
786
+ if not quiet:
787
+ print_to("Error installing package. Retry? [n/y/e]")
788
+ choice = input().strip()
789
+ if choice in ["y", "Y"]:
790
+ if not self.download(
791
+ msg.package.id,
792
+ download_dir,
793
+ quiet,
794
+ force,
795
+ prefix,
796
+ halt_on_error,
797
+ raise_on_error,
798
+ ):
799
+ return False
800
+ elif choice in ["e", "E"]:
801
+ return False
802
+
803
+ # All other messages
804
+ if not quiet:
805
+ # Collection downloading messages:
806
+ if isinstance(msg, StartCollectionMessage):
807
+ show("Downloading collection %r" % msg.collection.id)
808
+ prefix += " | "
809
+ print_to(prefix)
810
+ elif isinstance(msg, FinishCollectionMessage):
811
+ print_to(prefix)
812
+ prefix = prefix[:-4]
813
+ if self._errors:
814
+ show(
815
+ "Downloaded collection %r with errors"
816
+ % msg.collection.id
817
+ )
818
+ else:
819
+ show("Done downloading collection %s" % msg.collection.id)
820
+
821
+ # Package downloading messages:
822
+ elif isinstance(msg, StartPackageMessage):
823
+ show(
824
+ "Downloading package %s to %s..."
825
+ % (msg.package.id, download_dir)
826
+ )
827
+ elif isinstance(msg, UpToDateMessage):
828
+ show("Package %s is already up-to-date!" % msg.package.id, " ")
829
+ # elif isinstance(msg, StaleMessage):
830
+ # show('Package %s is out-of-date or corrupt' %
831
+ # msg.package.id, ' ')
832
+ elif isinstance(msg, StartUnzipMessage):
833
+ show("Unzipping %s." % msg.package.filename, " ")
834
+
835
+ # Data directory message:
836
+ elif isinstance(msg, SelectDownloadDirMessage):
837
+ download_dir = msg.download_dir
838
+ return True
839
+
840
+ def is_stale(self, info_or_id, download_dir=None):
841
+ return self.status(info_or_id, download_dir) == self.STALE
842
+
843
+ def is_installed(self, info_or_id, download_dir=None):
844
+ return self.status(info_or_id, download_dir) == self.INSTALLED
845
+
846
+ def clear_status_cache(self, id=None):
847
+ if id is None:
848
+ self._status_cache.clear()
849
+ else:
850
+ self._status_cache.pop(id, None)
851
+
852
+ def status(self, info_or_id, download_dir=None):
853
+ """
854
+ Return a constant describing the status of the given package
855
+ or collection. Status can be one of ``INSTALLED``,
856
+ ``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
857
+ """
858
+ if download_dir is None:
859
+ download_dir = self._download_dir
860
+ info = self._info_or_id(info_or_id)
861
+
862
+ # Handle collections:
863
+ if isinstance(info, Collection):
864
+ pkg_status = [self.status(pkg.id) for pkg in info.packages]
865
+ if self.STALE in pkg_status:
866
+ return self.STALE
867
+ elif self.PARTIAL in pkg_status:
868
+ return self.PARTIAL
869
+ elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
870
+ return self.PARTIAL
871
+ elif self.NOT_INSTALLED in pkg_status:
872
+ return self.NOT_INSTALLED
873
+ else:
874
+ return self.INSTALLED
875
+
876
+ # Handle packages:
877
+ else:
878
+ filepath = os.path.join(download_dir, info.filename)
879
+ if download_dir != self._download_dir:
880
+ return self._pkg_status(info, filepath)
881
+ else:
882
+ if info.id not in self._status_cache:
883
+ self._status_cache[info.id] = self._pkg_status(info, filepath)
884
+ return self._status_cache[info.id]
885
+
886
+ def _pkg_status(self, info, filepath):
887
+ if not os.path.exists(filepath):
888
+ return self.NOT_INSTALLED
889
+
890
+ # Check if the file has the correct size.
891
+ try:
892
+ filestat = os.stat(filepath)
893
+ except OSError:
894
+ return self.NOT_INSTALLED
895
+ if filestat.st_size != int(info.size):
896
+ return self.STALE
897
+
898
+ # Check if the file's checksum matches
899
+ if md5_hexdigest(filepath) != info.checksum:
900
+ return self.STALE
901
+
902
+ # If it's a zipfile, and it's been at least partially
903
+ # unzipped, then check if it's been fully unzipped.
904
+ if filepath.endswith(".zip"):
905
+ unzipdir = filepath[:-4]
906
+ if not os.path.exists(unzipdir):
907
+ return self.INSTALLED # but not unzipped -- ok!
908
+ if not os.path.isdir(unzipdir):
909
+ return self.STALE
910
+
911
+ unzipped_size = sum(
912
+ os.stat(os.path.join(d, f)).st_size
913
+ for d, _, files in os.walk(unzipdir)
914
+ for f in files
915
+ )
916
+ if unzipped_size != info.unzipped_size:
917
+ return self.STALE
918
+
919
+ # Otherwise, everything looks good.
920
+ return self.INSTALLED
921
+
922
+ def update(self, quiet=False, prefix="[nltk_data] "):
923
+ """
924
+ Re-download any packages whose status is STALE.
925
+ """
926
+ self.clear_status_cache()
927
+ for pkg in self.packages():
928
+ if self.status(pkg) == self.STALE:
929
+ self.download(pkg, quiet=quiet, prefix=prefix)
930
+
931
+ # /////////////////////////////////////////////////////////////////
932
+ # Index
933
+ # /////////////////////////////////////////////////////////////////
934
+
935
+ def _update_index(self, url=None):
936
+ """A helper function that ensures that self._index is
937
+ up-to-date. If the index is older than self.INDEX_TIMEOUT,
938
+ then download it again."""
939
+ # Check if the index is already up-to-date. If so, do nothing.
940
+ if not (
941
+ self._index is None
942
+ or url is not None
943
+ or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
944
+ ):
945
+ return
946
+
947
+ # If a URL was specified, then update our URL.
948
+ self._url = url or self._url
949
+
950
+ # Download the index file.
951
+ self._index = nltk.internals.ElementWrapper(
952
+ ElementTree.parse(urlopen(self._url)).getroot()
953
+ )
954
+ self._index_timestamp = time.time()
955
+
956
+ # Build a dictionary of packages.
957
+ packages = [Package.fromxml(p) for p in self._index.findall("packages/package")]
958
+ self._packages = {p.id: p for p in packages}
959
+
960
+ # Build a dictionary of collections.
961
+ collections = [
962
+ Collection.fromxml(c) for c in self._index.findall("collections/collection")
963
+ ]
964
+ self._collections = {c.id: c for c in collections}
965
+
966
+ # Replace identifiers with actual children in collection.children.
967
+ for collection in self._collections.values():
968
+ for i, child_id in enumerate(collection.children):
969
+ if child_id in self._packages:
970
+ collection.children[i] = self._packages[child_id]
971
+ elif child_id in self._collections:
972
+ collection.children[i] = self._collections[child_id]
973
+ else:
974
+ print(
975
+ "removing collection member with no package: {}".format(
976
+ child_id
977
+ )
978
+ )
979
+ del collection.children[i]
980
+
981
+ # Fill in collection.packages for each collection.
982
+ for collection in self._collections.values():
983
+ packages = {}
984
+ queue = [collection]
985
+ for child in queue:
986
+ if isinstance(child, Collection):
987
+ queue.extend(child.children)
988
+ elif isinstance(child, Package):
989
+ packages[child.id] = child
990
+ else:
991
+ pass
992
+ collection.packages = packages.values()
993
+
994
+ # Flush the status cache
995
+ self._status_cache.clear()
996
+
997
+ def index(self):
998
+ """
999
+ Return the XML index describing the packages available from
1000
+ the data server. If necessary, this index will be downloaded
1001
+ from the data server.
1002
+ """
1003
+ self._update_index()
1004
+ return self._index
1005
+
1006
+ def info(self, id):
1007
+ """Return the ``Package`` or ``Collection`` record for the
1008
+ given item."""
1009
+ self._update_index()
1010
+ if id in self._packages:
1011
+ return self._packages[id]
1012
+ if id in self._collections:
1013
+ return self._collections[id]
1014
+ raise ValueError("Package %r not found in index" % id)
1015
+
1016
+ def xmlinfo(self, id):
1017
+ """Return the XML info record for the given item"""
1018
+ self._update_index()
1019
+ for package in self._index.findall("packages/package"):
1020
+ if package.get("id") == id:
1021
+ return package
1022
+ for collection in self._index.findall("collections/collection"):
1023
+ if collection.get("id") == id:
1024
+ return collection
1025
+ raise ValueError("Package %r not found in index" % id)
1026
+
1027
+ # /////////////////////////////////////////////////////////////////
1028
+ # URL & Data Directory
1029
+ # /////////////////////////////////////////////////////////////////
1030
+
1031
+ def _get_url(self):
1032
+ """The URL for the data server's index file."""
1033
+ return self._url
1034
+
1035
+ def _set_url(self, url):
1036
+ """
1037
+ Set a new URL for the data server. If we're unable to contact
1038
+ the given url, then the original url is kept.
1039
+ """
1040
+ original_url = self._url
1041
+ try:
1042
+ self._update_index(url)
1043
+ except:
1044
+ self._url = original_url
1045
+ raise
1046
+
1047
+ url = property(_get_url, _set_url)
1048
+
1049
+ def default_download_dir(self):
1050
+ """
1051
+ Return the directory to which packages will be downloaded by
1052
+ default. This value can be overridden using the constructor,
1053
+ or on a case-by-case basis using the ``download_dir`` argument when
1054
+ calling ``download()``.
1055
+
1056
+ On Windows, the default download directory is
1057
+ ``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
1058
+ directory containing Python, e.g. ``C:\\Python25``.
1059
+
1060
+ On all other platforms, the default directory is the first of
1061
+ the following which exists or which can be created with write
1062
+ permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
1063
+ ``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
1064
+ """
1065
+ # Check if we are on GAE where we cannot write into filesystem.
1066
+ if "APPENGINE_RUNTIME" in os.environ:
1067
+ return
1068
+
1069
+ # Check if we have sufficient permissions to install in a
1070
+ # variety of system-wide locations.
1071
+ for nltkdir in nltk.data.path:
1072
+ if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
1073
+ return nltkdir
1074
+
1075
+ # On Windows, use %APPDATA%
1076
+ if sys.platform == "win32" and "APPDATA" in os.environ:
1077
+ homedir = os.environ["APPDATA"]
1078
+
1079
+ # Otherwise, install in the user's home directory.
1080
+ else:
1081
+ homedir = os.path.expanduser("~/")
1082
+ if homedir == "~/":
1083
+ raise ValueError("Could not find a default download directory")
1084
+
1085
+ # append "nltk_data" to the home directory
1086
+ return os.path.join(homedir, "nltk_data")
1087
+
1088
+ def _get_download_dir(self):
1089
+ """
1090
+ The default directory to which packages will be downloaded.
1091
+ This defaults to the value returned by ``default_download_dir()``.
1092
+ To override this default on a case-by-case basis, use the
1093
+ ``download_dir`` argument when calling ``download()``.
1094
+ """
1095
+ return self._download_dir
1096
+
1097
+ def _set_download_dir(self, download_dir):
1098
+ self._download_dir = download_dir
1099
+ # Clear the status cache.
1100
+ self._status_cache.clear()
1101
+
1102
+ download_dir = property(_get_download_dir, _set_download_dir)
1103
+
1104
+ # /////////////////////////////////////////////////////////////////
1105
+ # Interactive Shell
1106
+ # /////////////////////////////////////////////////////////////////
1107
+
1108
+ def _interactive_download(self):
1109
+ # Try the GUI first; if that doesn't work, try the simple
1110
+ # interactive shell.
1111
+ if TKINTER:
1112
+ try:
1113
+ DownloaderGUI(self).mainloop()
1114
+ except TclError:
1115
+ DownloaderShell(self).run()
1116
+ else:
1117
+ DownloaderShell(self).run()
1118
+
1119
+
1120
+ class DownloaderShell:
1121
+ def __init__(self, dataserver):
1122
+ self._ds = dataserver
1123
+
1124
+ def _simple_interactive_menu(self, *options):
1125
+ print("-" * 75)
1126
+ spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
1127
+ print(" " + spc.join(options))
1128
+ print("-" * 75)
1129
+
1130
+ def run(self):
1131
+ print("NLTK Downloader")
1132
+ while True:
1133
+ self._simple_interactive_menu(
1134
+ "d) Download",
1135
+ "l) List",
1136
+ " u) Update",
1137
+ "c) Config",
1138
+ "h) Help",
1139
+ "q) Quit",
1140
+ )
1141
+ user_input = input("Downloader> ").strip()
1142
+ if not user_input:
1143
+ print()
1144
+ continue
1145
+ command = user_input.lower().split()[0]
1146
+ args = user_input.split()[1:]
1147
+ try:
1148
+ if command == "l":
1149
+ print()
1150
+ self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
1151
+ elif command == "h":
1152
+ self._simple_interactive_help()
1153
+ elif command == "c":
1154
+ self._simple_interactive_config()
1155
+ elif command in ("q", "x"):
1156
+ return
1157
+ elif command == "d":
1158
+ self._simple_interactive_download(args)
1159
+ elif command == "u":
1160
+ self._simple_interactive_update()
1161
+ else:
1162
+ print("Command %r unrecognized" % user_input)
1163
+ except HTTPError as e:
1164
+ print("Error reading from server: %s" % e)
1165
+ except URLError as e:
1166
+ print("Error connecting to server: %s" % e.reason)
1167
+ # try checking if user_input is a package name, &
1168
+ # downloading it?
1169
+ print()
1170
+
1171
+ def _simple_interactive_download(self, args):
1172
+ if args:
1173
+ for arg in args:
1174
+ try:
1175
+ self._ds.download(arg, prefix=" ")
1176
+ except (OSError, ValueError) as e:
1177
+ print(e)
1178
+ else:
1179
+ while True:
1180
+ print()
1181
+ print("Download which package (l=list; x=cancel)?")
1182
+ user_input = input(" Identifier> ")
1183
+ if user_input.lower() == "l":
1184
+ self._ds.list(
1185
+ self._ds.download_dir,
1186
+ header=False,
1187
+ more_prompt=True,
1188
+ skip_installed=True,
1189
+ )
1190
+ continue
1191
+ elif user_input.lower() in ("x", "q", ""):
1192
+ return
1193
+ elif user_input:
1194
+ for id in user_input.split():
1195
+ try:
1196
+ self._ds.download(id, prefix=" ")
1197
+ except (OSError, ValueError) as e:
1198
+ print(e)
1199
+ break
1200
+
1201
+ def _simple_interactive_update(self):
1202
+ while True:
1203
+ stale_packages = []
1204
+ stale = partial = False
1205
+ for info in sorted(getattr(self._ds, "packages")(), key=str):
1206
+ if self._ds.status(info) == self._ds.STALE:
1207
+ stale_packages.append((info.id, info.name))
1208
+
1209
+ print()
1210
+ if stale_packages:
1211
+ print("Will update following packages (o=ok; x=cancel)")
1212
+ for pid, pname in stale_packages:
1213
+ name = textwrap.fill(
1214
+ "-" * 27 + (pname), 75, subsequent_indent=27 * " "
1215
+ )[27:]
1216
+ print(" [ ] {} {}".format(pid.ljust(20, "."), name))
1217
+ print()
1218
+
1219
+ user_input = input(" Identifier> ")
1220
+ if user_input.lower() == "o":
1221
+ for pid, pname in stale_packages:
1222
+ try:
1223
+ self._ds.download(pid, prefix=" ")
1224
+ except (OSError, ValueError) as e:
1225
+ print(e)
1226
+ break
1227
+ elif user_input.lower() in ("x", "q", ""):
1228
+ return
1229
+ else:
1230
+ print("Nothing to update.")
1231
+ return
1232
+
1233
+ def _simple_interactive_help(self):
1234
+ print()
1235
+ print("Commands:")
1236
+ print(
1237
+ " d) Download a package or collection u) Update out of date packages"
1238
+ )
1239
+ print(" l) List packages & collections h) Help")
1240
+ print(" c) View & Modify Configuration q) Quit")
1241
+
1242
+ def _show_config(self):
1243
+ print()
1244
+ print("Data Server:")
1245
+ print(" - URL: <%s>" % self._ds.url)
1246
+ print(" - %d Package Collections Available" % len(self._ds.collections()))
1247
+ print(" - %d Individual Packages Available" % len(self._ds.packages()))
1248
+ print()
1249
+ print("Local Machine:")
1250
+ print(" - Data directory: %s" % self._ds.download_dir)
1251
+
1252
+ def _simple_interactive_config(self):
1253
+ self._show_config()
1254
+ while True:
1255
+ print()
1256
+ self._simple_interactive_menu(
1257
+ "s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
1258
+ )
1259
+ user_input = input("Config> ").strip().lower()
1260
+ if user_input == "s":
1261
+ self._show_config()
1262
+ elif user_input == "d":
1263
+ new_dl_dir = input(" New Directory> ").strip()
1264
+ if new_dl_dir in ("", "x", "q", "X", "Q"):
1265
+ print(" Cancelled!")
1266
+ elif os.path.isdir(new_dl_dir):
1267
+ self._ds.download_dir = new_dl_dir
1268
+ else:
1269
+ print("Directory %r not found! Create it first." % new_dl_dir)
1270
+ elif user_input == "u":
1271
+ new_url = input(" New URL> ").strip()
1272
+ if new_url in ("", "x", "q", "X", "Q"):
1273
+ print(" Cancelled!")
1274
+ else:
1275
+ if not new_url.startswith(("http://", "https://")):
1276
+ new_url = "http://" + new_url
1277
+ try:
1278
+ self._ds.url = new_url
1279
+ except Exception as e:
1280
+ print(f"Error reading <{new_url!r}>:\n {e}")
1281
+ elif user_input == "m":
1282
+ break
1283
+
1284
+
1285
+ class DownloaderGUI:
1286
+ """
1287
+ Graphical interface for downloading packages from the NLTK data
1288
+ server.
1289
+ """
1290
+
1291
+ # /////////////////////////////////////////////////////////////////
1292
+ # Column Configuration
1293
+ # /////////////////////////////////////////////////////////////////
1294
+
1295
+ COLUMNS = [
1296
+ "",
1297
+ "Identifier",
1298
+ "Name",
1299
+ "Size",
1300
+ "Status",
1301
+ "Unzipped Size",
1302
+ "Copyright",
1303
+ "Contact",
1304
+ "License",
1305
+ "Author",
1306
+ "Subdir",
1307
+ "Checksum",
1308
+ ]
1309
+ """A list of the names of columns. This controls the order in
1310
+ which the columns will appear. If this is edited, then
1311
+ ``_package_to_columns()`` may need to be edited to match."""
1312
+
1313
+ COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0}
1314
+ """A dictionary specifying how columns should be resized when the
1315
+ table is resized. Columns with weight 0 will not be resized at
1316
+ all; and columns with high weight will be resized more.
1317
+ Default weight (for columns not explicitly listed) is 1."""
1318
+
1319
+ COLUMN_WIDTHS = {
1320
+ "": 1,
1321
+ "Identifier": 20,
1322
+ "Name": 45,
1323
+ "Size": 10,
1324
+ "Unzipped Size": 10,
1325
+ "Status": 12,
1326
+ }
1327
+ """A dictionary specifying how wide each column should be, in
1328
+ characters. The default width (for columns not explicitly
1329
+ listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
1330
+
1331
+ DEFAULT_COLUMN_WIDTH = 30
1332
+ """The default width for columns that are not explicitly listed
1333
+ in ``COLUMN_WIDTHS``."""
1334
+
1335
+ INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"]
1336
+ """The set of columns that should be displayed by default."""
1337
+
1338
+ # Perform a few import-time sanity checks to make sure that the
1339
+ # column configuration variables are defined consistently:
1340
+ for c in COLUMN_WEIGHTS:
1341
+ assert c in COLUMNS
1342
+ for c in COLUMN_WIDTHS:
1343
+ assert c in COLUMNS
1344
+ for c in INITIAL_COLUMNS:
1345
+ assert c in COLUMNS
1346
+
1347
+ # /////////////////////////////////////////////////////////////////
1348
+ # Color Configuration
1349
+ # /////////////////////////////////////////////////////////////////
1350
+
1351
+ _BACKDROP_COLOR = ("#000", "#ccc")
1352
+
1353
+ _ROW_COLOR = {
1354
+ Downloader.INSTALLED: ("#afa", "#080"),
1355
+ Downloader.PARTIAL: ("#ffa", "#880"),
1356
+ Downloader.STALE: ("#faa", "#800"),
1357
+ Downloader.NOT_INSTALLED: ("#fff", "#888"),
1358
+ }
1359
+
1360
+ _MARK_COLOR = ("#000", "#ccc")
1361
+
1362
+ # _FRONT_TAB_COLOR = ('#ccf', '#008')
1363
+ # _BACK_TAB_COLOR = ('#88a', '#448')
1364
+ _FRONT_TAB_COLOR = ("#fff", "#45c")
1365
+ _BACK_TAB_COLOR = ("#aaa", "#67a")
1366
+
1367
+ _PROGRESS_COLOR = ("#f00", "#aaa")
1368
+
1369
+ _TAB_FONT = "helvetica -16 bold"
1370
+
1371
+ # /////////////////////////////////////////////////////////////////
1372
+ # Constructor
1373
+ # /////////////////////////////////////////////////////////////////
1374
+
1375
+ def __init__(self, dataserver, use_threads=True):
1376
+ self._ds = dataserver
1377
+ self._use_threads = use_threads
1378
+
1379
+ # For the threaded downloader:
1380
+ self._download_lock = threading.Lock()
1381
+ self._download_msg_queue = []
1382
+ self._download_abort_queue = []
1383
+ self._downloading = False
1384
+
1385
+ # For tkinter after callbacks:
1386
+ self._afterid = {}
1387
+
1388
+ # A message log.
1389
+ self._log_messages = []
1390
+ self._log_indent = 0
1391
+ self._log("NLTK Downloader Started!")
1392
+
1393
+ # Create the main window.
1394
+ top = self.top = Tk()
1395
+ top.geometry("+50+50")
1396
+ top.title("NLTK Downloader")
1397
+ top.configure(background=self._BACKDROP_COLOR[1])
1398
+
1399
+ # Set up some bindings now, in case anything goes wrong.
1400
+ top.bind("<Control-q>", self.destroy)
1401
+ top.bind("<Control-x>", self.destroy)
1402
+ self._destroyed = False
1403
+
1404
+ self._column_vars = {}
1405
+
1406
+ # Initialize the GUI.
1407
+ self._init_widgets()
1408
+ self._init_menu()
1409
+ try:
1410
+ self._fill_table()
1411
+ except HTTPError as e:
1412
+ showerror("Error reading from server", e)
1413
+ except URLError as e:
1414
+ showerror("Error connecting to server", e.reason)
1415
+
1416
+ self._show_info()
1417
+ self._select_columns()
1418
+ self._table.select(0)
1419
+
1420
+ # Make sure we get notified when we're destroyed, so we can
1421
+ # cancel any download in progress.
1422
+ self._table.bind("<Destroy>", self._destroy)
1423
+
1424
+ def _log(self, msg):
1425
+ self._log_messages.append(
1426
+ "{} {}{}".format(time.ctime(), " | " * self._log_indent, msg)
1427
+ )
1428
+
1429
+ # /////////////////////////////////////////////////////////////////
1430
+ # Internals
1431
+ # /////////////////////////////////////////////////////////////////
1432
+
1433
+ def _init_widgets(self):
1434
+ # Create the top-level frame structures
1435
+ f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0)
1436
+ f1.pack(sid="top", expand=True, fill="both")
1437
+ f1.grid_rowconfigure(2, weight=1)
1438
+ f1.grid_columnconfigure(0, weight=1)
1439
+ Frame(f1, height=8).grid(column=0, row=0) # spacer
1440
+ tabframe = Frame(f1)
1441
+ tabframe.grid(column=0, row=1, sticky="news")
1442
+ tableframe = Frame(f1)
1443
+ tableframe.grid(column=0, row=2, sticky="news")
1444
+ buttonframe = Frame(f1)
1445
+ buttonframe.grid(column=0, row=3, sticky="news")
1446
+ Frame(f1, height=8).grid(column=0, row=4) # spacer
1447
+ infoframe = Frame(f1)
1448
+ infoframe.grid(column=0, row=5, sticky="news")
1449
+ Frame(f1, height=8).grid(column=0, row=6) # spacer
1450
+ progressframe = Frame(
1451
+ self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
1452
+ )
1453
+ progressframe.pack(side="bottom", fill="x")
1454
+ self.top["border"] = 0
1455
+ self.top["highlightthickness"] = 0
1456
+
1457
+ # Create the tabs
1458
+ self._tab_names = ["Collections", "Corpora", "Models", "All Packages"]
1459
+ self._tabs = {}
1460
+ for i, tab in enumerate(self._tab_names):
1461
+ label = Label(tabframe, text=tab, font=self._TAB_FONT)
1462
+ label.pack(side="left", padx=((i + 1) % 2) * 10)
1463
+ label.bind("<Button-1>", self._select_tab)
1464
+ self._tabs[tab.lower()] = label
1465
+
1466
+ # Create the table.
1467
+ column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
1468
+ self._table = Table(
1469
+ tableframe,
1470
+ self.COLUMNS,
1471
+ column_weights=column_weights,
1472
+ highlightthickness=0,
1473
+ listbox_height=16,
1474
+ reprfunc=self._table_reprfunc,
1475
+ )
1476
+ self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
1477
+ for i, column in enumerate(self.COLUMNS):
1478
+ width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
1479
+ self._table.columnconfig(i, width=width)
1480
+ self._table.pack(expand=True, fill="both")
1481
+ self._table.focus()
1482
+ self._table.bind_to_listboxes("<Double-Button-1>", self._download)
1483
+ self._table.bind("<space>", self._table_mark)
1484
+ self._table.bind("<Return>", self._download)
1485
+ self._table.bind("<Left>", self._prev_tab)
1486
+ self._table.bind("<Right>", self._next_tab)
1487
+ self._table.bind("<Control-a>", self._mark_all)
1488
+
1489
+ # Create entry boxes for URL & download_dir
1490
+ infoframe.grid_columnconfigure(1, weight=1)
1491
+
1492
+ info = [
1493
+ ("url", "Server Index:", self._set_url),
1494
+ ("download_dir", "Download Directory:", self._set_download_dir),
1495
+ ]
1496
+ self._info = {}
1497
+ for (i, (key, label, callback)) in enumerate(info):
1498
+ Label(infoframe, text=label).grid(column=0, row=i, sticky="e")
1499
+ entry = Entry(
1500
+ infoframe,
1501
+ font="courier",
1502
+ relief="groove",
1503
+ disabledforeground="#007aff",
1504
+ foreground="#007aff",
1505
+ )
1506
+ self._info[key] = (entry, callback)
1507
+ entry.bind("<Return>", self._info_save)
1508
+ entry.bind("<Button-1>", lambda e, key=key: self._info_edit(key))
1509
+ entry.grid(column=1, row=i, sticky="ew")
1510
+
1511
+ # If the user edits url or download_dir, and then clicks outside
1512
+ # the entry box, then save their results.
1513
+ self.top.bind("<Button-1>", self._info_save)
1514
+
1515
+ # Create Download & Refresh buttons.
1516
+ self._download_button = Button(
1517
+ buttonframe, text="Download", command=self._download, width=8
1518
+ )
1519
+ self._download_button.pack(side="left")
1520
+ self._refresh_button = Button(
1521
+ buttonframe, text="Refresh", command=self._refresh, width=8
1522
+ )
1523
+ self._refresh_button.pack(side="right")
1524
+
1525
+ # Create Progress bar
1526
+ self._progresslabel = Label(
1527
+ progressframe,
1528
+ text="",
1529
+ foreground=self._BACKDROP_COLOR[0],
1530
+ background=self._BACKDROP_COLOR[1],
1531
+ )
1532
+ self._progressbar = Canvas(
1533
+ progressframe,
1534
+ width=200,
1535
+ height=16,
1536
+ background=self._PROGRESS_COLOR[1],
1537
+ relief="sunken",
1538
+ border=1,
1539
+ )
1540
+ self._init_progressbar()
1541
+ self._progressbar.pack(side="right")
1542
+ self._progresslabel.pack(side="left")
1543
+
1544
+ def _init_menu(self):
1545
+ menubar = Menu(self.top)
1546
+
1547
+ filemenu = Menu(menubar, tearoff=0)
1548
+ filemenu.add_command(
1549
+ label="Download", underline=0, command=self._download, accelerator="Return"
1550
+ )
1551
+ filemenu.add_separator()
1552
+ filemenu.add_command(
1553
+ label="Change Server Index",
1554
+ underline=7,
1555
+ command=lambda: self._info_edit("url"),
1556
+ )
1557
+ filemenu.add_command(
1558
+ label="Change Download Directory",
1559
+ underline=0,
1560
+ command=lambda: self._info_edit("download_dir"),
1561
+ )
1562
+ filemenu.add_separator()
1563
+ filemenu.add_command(label="Show Log", underline=5, command=self._show_log)
1564
+ filemenu.add_separator()
1565
+ filemenu.add_command(
1566
+ label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
1567
+ )
1568
+ menubar.add_cascade(label="File", underline=0, menu=filemenu)
1569
+
1570
+ # Create a menu to control which columns of the table are
1571
+ # shown. n.b.: we never hide the first two columns (mark and
1572
+ # identifier).
1573
+ viewmenu = Menu(menubar, tearoff=0)
1574
+ for column in self._table.column_names[2:]:
1575
+ var = IntVar(self.top)
1576
+ assert column not in self._column_vars
1577
+ self._column_vars[column] = var
1578
+ if column in self.INITIAL_COLUMNS:
1579
+ var.set(1)
1580
+ viewmenu.add_checkbutton(
1581
+ label=column, underline=0, variable=var, command=self._select_columns
1582
+ )
1583
+ menubar.add_cascade(label="View", underline=0, menu=viewmenu)
1584
+
1585
+ # Create a sort menu
1586
+ # [xx] this should be selectbuttons; and it should include
1587
+ # reversed sorts as options.
1588
+ sortmenu = Menu(menubar, tearoff=0)
1589
+ for column in self._table.column_names[1:]:
1590
+ sortmenu.add_command(
1591
+ label="Sort by %s" % column,
1592
+ command=(lambda c=column: self._table.sort_by(c, "ascending")),
1593
+ )
1594
+ sortmenu.add_separator()
1595
+ # sortmenu.add_command(label='Descending Sort:')
1596
+ for column in self._table.column_names[1:]:
1597
+ sortmenu.add_command(
1598
+ label="Reverse sort by %s" % column,
1599
+ command=(lambda c=column: self._table.sort_by(c, "descending")),
1600
+ )
1601
+ menubar.add_cascade(label="Sort", underline=0, menu=sortmenu)
1602
+
1603
+ helpmenu = Menu(menubar, tearoff=0)
1604
+ helpmenu.add_command(label="About", underline=0, command=self.about)
1605
+ helpmenu.add_command(
1606
+ label="Instructions", underline=0, command=self.help, accelerator="F1"
1607
+ )
1608
+ menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
1609
+ self.top.bind("<F1>", self.help)
1610
+
1611
+ self.top.config(menu=menubar)
1612
+
1613
+ def _select_columns(self):
1614
+ for (column, var) in self._column_vars.items():
1615
+ if var.get():
1616
+ self._table.show_column(column)
1617
+ else:
1618
+ self._table.hide_column(column)
1619
+
1620
+ def _refresh(self):
1621
+ self._ds.clear_status_cache()
1622
+ try:
1623
+ self._fill_table()
1624
+ except HTTPError as e:
1625
+ showerror("Error reading from server", e)
1626
+ except URLError as e:
1627
+ showerror("Error connecting to server", e.reason)
1628
+ self._table.select(0)
1629
+
1630
+ def _info_edit(self, info_key):
1631
+ self._info_save() # just in case.
1632
+ (entry, callback) = self._info[info_key]
1633
+ entry["state"] = "normal"
1634
+ entry["relief"] = "sunken"
1635
+ entry.focus()
1636
+
1637
+ def _info_save(self, e=None):
1638
+ focus = self._table
1639
+ for entry, callback in self._info.values():
1640
+ if entry["state"] == "disabled":
1641
+ continue
1642
+ if e is not None and e.widget is entry and e.keysym != "Return":
1643
+ focus = entry
1644
+ else:
1645
+ entry["state"] = "disabled"
1646
+ entry["relief"] = "groove"
1647
+ callback(entry.get())
1648
+ focus.focus()
1649
+
1650
+ def _table_reprfunc(self, row, col, val):
1651
+ if self._table.column_names[col].endswith("Size"):
1652
+ if isinstance(val, str):
1653
+ return " %s" % val
1654
+ elif val < 1024**2:
1655
+ return " %.1f KB" % (val / 1024.0**1)
1656
+ elif val < 1024**3:
1657
+ return " %.1f MB" % (val / 1024.0**2)
1658
+ else:
1659
+ return " %.1f GB" % (val / 1024.0**3)
1660
+
1661
+ if col in (0, ""):
1662
+ return str(val)
1663
+ else:
1664
+ return " %s" % val
1665
+
1666
+ def _set_url(self, url):
1667
+ if url == self._ds.url:
1668
+ return
1669
+ try:
1670
+ self._ds.url = url
1671
+ self._fill_table()
1672
+ except OSError as e:
1673
+ showerror("Error Setting Server Index", str(e))
1674
+ self._show_info()
1675
+
1676
+ def _set_download_dir(self, download_dir):
1677
+ if self._ds.download_dir == download_dir:
1678
+ return
1679
+ # check if the dir exists, and if not, ask if we should create it?
1680
+
1681
+ # Clear our status cache, & re-check what's installed
1682
+ self._ds.download_dir = download_dir
1683
+ try:
1684
+ self._fill_table()
1685
+ except HTTPError as e:
1686
+ showerror("Error reading from server", e)
1687
+ except URLError as e:
1688
+ showerror("Error connecting to server", e.reason)
1689
+ self._show_info()
1690
+
1691
+ def _show_info(self):
1692
+ print("showing info", self._ds.url)
1693
+ for entry, cb in self._info.values():
1694
+ entry["state"] = "normal"
1695
+ entry.delete(0, "end")
1696
+ self._info["url"][0].insert(0, self._ds.url)
1697
+ self._info["download_dir"][0].insert(0, self._ds.download_dir)
1698
+ for entry, cb in self._info.values():
1699
+ entry["state"] = "disabled"
1700
+
1701
+ def _prev_tab(self, *e):
1702
+ for i, tab in enumerate(self._tab_names):
1703
+ if tab.lower() == self._tab and i > 0:
1704
+ self._tab = self._tab_names[i - 1].lower()
1705
+ try:
1706
+ return self._fill_table()
1707
+ except HTTPError as e:
1708
+ showerror("Error reading from server", e)
1709
+ except URLError as e:
1710
+ showerror("Error connecting to server", e.reason)
1711
+
1712
+ def _next_tab(self, *e):
1713
+ for i, tab in enumerate(self._tab_names):
1714
+ if tab.lower() == self._tab and i < (len(self._tabs) - 1):
1715
+ self._tab = self._tab_names[i + 1].lower()
1716
+ try:
1717
+ return self._fill_table()
1718
+ except HTTPError as e:
1719
+ showerror("Error reading from server", e)
1720
+ except URLError as e:
1721
+ showerror("Error connecting to server", e.reason)
1722
+
1723
+ def _select_tab(self, event):
1724
+ self._tab = event.widget["text"].lower()
1725
+ try:
1726
+ self._fill_table()
1727
+ except HTTPError as e:
1728
+ showerror("Error reading from server", e)
1729
+ except URLError as e:
1730
+ showerror("Error connecting to server", e.reason)
1731
+
1732
+ _tab = "collections"
1733
+ # _tab = 'corpora'
1734
+ _rows = None
1735
+
1736
+ def _fill_table(self):
1737
+ selected_row = self._table.selected_row()
1738
+ self._table.clear()
1739
+ if self._tab == "all packages":
1740
+ items = self._ds.packages()
1741
+ elif self._tab == "corpora":
1742
+ items = self._ds.corpora()
1743
+ elif self._tab == "models":
1744
+ items = self._ds.models()
1745
+ elif self._tab == "collections":
1746
+ items = self._ds.collections()
1747
+ else:
1748
+ assert 0, "bad tab value %r" % self._tab
1749
+ rows = [self._package_to_columns(item) for item in items]
1750
+ self._table.extend(rows)
1751
+
1752
+ # Highlight the active tab.
1753
+ for tab, label in self._tabs.items():
1754
+ if tab == self._tab:
1755
+ label.configure(
1756
+ foreground=self._FRONT_TAB_COLOR[0],
1757
+ background=self._FRONT_TAB_COLOR[1],
1758
+ )
1759
+ else:
1760
+ label.configure(
1761
+ foreground=self._BACK_TAB_COLOR[0],
1762
+ background=self._BACK_TAB_COLOR[1],
1763
+ )
1764
+
1765
+ self._table.sort_by("Identifier", order="ascending")
1766
+ self._color_table()
1767
+ self._table.select(selected_row)
1768
+
1769
+ # This is a hack, because the scrollbar isn't updating its
1770
+ # position right -- I'm not sure what the underlying cause is
1771
+ # though. (This is on OS X w/ python 2.5) The length of
1772
+ # delay that's necessary seems to depend on how fast the
1773
+ # comptuer is. :-/
1774
+ self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
1775
+ self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
1776
+
1777
+ def _update_table_status(self):
1778
+ for row_num in range(len(self._table)):
1779
+ status = self._ds.status(self._table[row_num, "Identifier"])
1780
+ self._table[row_num, "Status"] = status
1781
+ self._color_table()
1782
+
1783
+ def _download(self, *e):
1784
+ # If we're using threads, then delegate to the threaded
1785
+ # downloader instead.
1786
+ if self._use_threads:
1787
+ return self._download_threaded(*e)
1788
+
1789
+ marked = [
1790
+ self._table[row, "Identifier"]
1791
+ for row in range(len(self._table))
1792
+ if self._table[row, 0] != ""
1793
+ ]
1794
+ selection = self._table.selected_row()
1795
+ if not marked and selection is not None:
1796
+ marked = [self._table[selection, "Identifier"]]
1797
+
1798
+ download_iter = self._ds.incr_download(marked, self._ds.download_dir)
1799
+ self._log_indent = 0
1800
+ self._download_cb(download_iter, marked)
1801
+
1802
+ _DL_DELAY = 10
1803
+
1804
+ def _download_cb(self, download_iter, ids):
1805
+ try:
1806
+ msg = next(download_iter)
1807
+ except StopIteration:
1808
+ # self._fill_table(sort=False)
1809
+ self._update_table_status()
1810
+ afterid = self.top.after(10, self._show_progress, 0)
1811
+ self._afterid["_download_cb"] = afterid
1812
+ return
1813
+
1814
+ def show(s):
1815
+ self._progresslabel["text"] = s
1816
+ self._log(s)
1817
+
1818
+ if isinstance(msg, ProgressMessage):
1819
+ self._show_progress(msg.progress)
1820
+ elif isinstance(msg, ErrorMessage):
1821
+ show(msg.message)
1822
+ if msg.package is not None:
1823
+ self._select(msg.package.id)
1824
+ self._show_progress(None)
1825
+ return # halt progress.
1826
+ elif isinstance(msg, StartCollectionMessage):
1827
+ show("Downloading collection %s" % msg.collection.id)
1828
+ self._log_indent += 1
1829
+ elif isinstance(msg, StartPackageMessage):
1830
+ show("Downloading package %s" % msg.package.id)
1831
+ elif isinstance(msg, UpToDateMessage):
1832
+ show("Package %s is up-to-date!" % msg.package.id)
1833
+ # elif isinstance(msg, StaleMessage):
1834
+ # show('Package %s is out-of-date or corrupt' % msg.package.id)
1835
+ elif isinstance(msg, FinishDownloadMessage):
1836
+ show("Finished downloading %r." % msg.package.id)
1837
+ elif isinstance(msg, StartUnzipMessage):
1838
+ show("Unzipping %s" % msg.package.filename)
1839
+ elif isinstance(msg, FinishCollectionMessage):
1840
+ self._log_indent -= 1
1841
+ show("Finished downloading collection %r." % msg.collection.id)
1842
+ self._clear_mark(msg.collection.id)
1843
+ elif isinstance(msg, FinishPackageMessage):
1844
+ self._clear_mark(msg.package.id)
1845
+ afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
1846
+ self._afterid["_download_cb"] = afterid
1847
+
1848
+ def _select(self, id):
1849
+ for row in range(len(self._table)):
1850
+ if self._table[row, "Identifier"] == id:
1851
+ self._table.select(row)
1852
+ return
1853
+
1854
+ def _color_table(self):
1855
+ # Color rows according to status.
1856
+ for row in range(len(self._table)):
1857
+ bg, sbg = self._ROW_COLOR[self._table[row, "Status"]]
1858
+ fg, sfg = ("black", "white")
1859
+ self._table.rowconfig(
1860
+ row,
1861
+ foreground=fg,
1862
+ selectforeground=sfg,
1863
+ background=bg,
1864
+ selectbackground=sbg,
1865
+ )
1866
+ # Color the marked column
1867
+ self._table.itemconfigure(
1868
+ row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
1869
+ )
1870
+
1871
+ def _clear_mark(self, id):
1872
+ for row in range(len(self._table)):
1873
+ if self._table[row, "Identifier"] == id:
1874
+ self._table[row, 0] = ""
1875
+
1876
+ def _mark_all(self, *e):
1877
+ for row in range(len(self._table)):
1878
+ self._table[row, 0] = "X"
1879
+
1880
+ def _table_mark(self, *e):
1881
+ selection = self._table.selected_row()
1882
+ if selection >= 0:
1883
+ if self._table[selection][0] != "":
1884
+ self._table[selection, 0] = ""
1885
+ else:
1886
+ self._table[selection, 0] = "X"
1887
+ self._table.select(delta=1)
1888
+
1889
+ def _show_log(self):
1890
+ text = "\n".join(self._log_messages)
1891
+ ShowText(self.top, "NLTK Downloader Log", text)
1892
+
1893
+ def _package_to_columns(self, pkg):
1894
+ """
1895
+ Given a package, return a list of values describing that
1896
+ package, one for each column in ``self.COLUMNS``.
1897
+ """
1898
+ row = []
1899
+ for column_index, column_name in enumerate(self.COLUMNS):
1900
+ if column_index == 0: # Mark:
1901
+ row.append("")
1902
+ elif column_name == "Identifier":
1903
+ row.append(pkg.id)
1904
+ elif column_name == "Status":
1905
+ row.append(self._ds.status(pkg))
1906
+ else:
1907
+ attr = column_name.lower().replace(" ", "_")
1908
+ row.append(getattr(pkg, attr, "n/a"))
1909
+ return row
1910
+
1911
+ # /////////////////////////////////////////////////////////////////
1912
+ # External Interface
1913
+ # /////////////////////////////////////////////////////////////////
1914
+
1915
+ def destroy(self, *e):
1916
+ if self._destroyed:
1917
+ return
1918
+ self.top.destroy()
1919
+ self._destroyed = True
1920
+
1921
+ def _destroy(self, *e):
1922
+ if self.top is not None:
1923
+ for afterid in self._afterid.values():
1924
+ self.top.after_cancel(afterid)
1925
+
1926
+ # Abort any download in progress.
1927
+ if self._downloading and self._use_threads:
1928
+ self._abort_download()
1929
+
1930
+ # Make sure the garbage collector destroys these now;
1931
+ # otherwise, they may get destroyed when we're not in the main
1932
+ # thread, which would make Tkinter unhappy.
1933
+ self._column_vars.clear()
1934
+
1935
+ def mainloop(self, *args, **kwargs):
1936
+ self.top.mainloop(*args, **kwargs)
1937
+
1938
+ # /////////////////////////////////////////////////////////////////
1939
+ # HELP
1940
+ # /////////////////////////////////////////////////////////////////
1941
+
1942
+ HELP = textwrap.dedent(
1943
+ """\
1944
+ This tool can be used to download a variety of corpora and models
1945
+ that can be used with NLTK. Each corpus or model is distributed
1946
+ in a single zip file, known as a \"package file.\" You can
1947
+ download packages individually, or you can download pre-defined
1948
+ collections of packages.
1949
+
1950
+ When you download a package, it will be saved to the \"download
1951
+ directory.\" A default download directory is chosen when you run
1952
+
1953
+ the downloader; but you may also select a different download
1954
+ directory. On Windows, the default download directory is
1955
+
1956
+
1957
+ \"package.\"
1958
+
1959
+ The NLTK downloader can be used to download a variety of corpora,
1960
+ models, and other data packages.
1961
+
1962
+ Keyboard shortcuts::
1963
+ [return]\t Download
1964
+ [up]\t Select previous package
1965
+ [down]\t Select next package
1966
+ [left]\t Select previous tab
1967
+ [right]\t Select next tab
1968
+ """
1969
+ )
1970
+
1971
+ def help(self, *e):
1972
+ # The default font's not very legible; try using 'fixed' instead.
1973
+ try:
1974
+ ShowText(
1975
+ self.top,
1976
+ "Help: NLTK Downloader",
1977
+ self.HELP.strip(),
1978
+ width=75,
1979
+ font="fixed",
1980
+ )
1981
+ except:
1982
+ ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75)
1983
+
1984
+ def about(self, *e):
1985
+ ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
1986
+ TITLE = "About: NLTK Downloader"
1987
+ try:
1988
+ from tkinter.messagebox import Message
1989
+
1990
+ Message(message=ABOUT, title=TITLE).show()
1991
+ except ImportError:
1992
+ ShowText(self.top, TITLE, ABOUT)
1993
+
1994
+ # /////////////////////////////////////////////////////////////////
1995
+ # Progress Bar
1996
+ # /////////////////////////////////////////////////////////////////
1997
+
1998
+ _gradient_width = 5
1999
+
2000
+ def _init_progressbar(self):
2001
+ c = self._progressbar
2002
+ width, height = int(c["width"]), int(c["height"])
2003
+ for i in range(0, (int(c["width"]) * 2) // self._gradient_width):
2004
+ c.create_line(
2005
+ i * self._gradient_width + 20,
2006
+ -20,
2007
+ i * self._gradient_width - height - 20,
2008
+ height + 20,
2009
+ width=self._gradient_width,
2010
+ fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12),
2011
+ )
2012
+ c.addtag_all("gradient")
2013
+ c.itemconfig("gradient", state="hidden")
2014
+
2015
+ # This is used to display progress
2016
+ c.addtag_withtag(
2017
+ "redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
2018
+ )
2019
+
2020
+ def _show_progress(self, percent):
2021
+ c = self._progressbar
2022
+ if percent is None:
2023
+ c.coords("redbox", 0, 0, 0, 0)
2024
+ c.itemconfig("gradient", state="hidden")
2025
+ else:
2026
+ width, height = int(c["width"]), int(c["height"])
2027
+ x = percent * int(width) // 100 + 1
2028
+ c.coords("redbox", 0, 0, x, height + 1)
2029
+
2030
+ def _progress_alive(self):
2031
+ c = self._progressbar
2032
+ if not self._downloading:
2033
+ c.itemconfig("gradient", state="hidden")
2034
+ else:
2035
+ c.itemconfig("gradient", state="normal")
2036
+ x1, y1, x2, y2 = c.bbox("gradient")
2037
+ if x1 <= -100:
2038
+ c.move("gradient", (self._gradient_width * 6) - 4, 0)
2039
+ else:
2040
+ c.move("gradient", -4, 0)
2041
+ afterid = self.top.after(200, self._progress_alive)
2042
+ self._afterid["_progress_alive"] = afterid
2043
+
2044
+ # /////////////////////////////////////////////////////////////////
2045
+ # Threaded downloader
2046
+ # /////////////////////////////////////////////////////////////////
2047
+
2048
+ def _download_threaded(self, *e):
2049
+ # If the user tries to start a new download while we're already
2050
+ # downloading something, then abort the current download instead.
2051
+ if self._downloading:
2052
+ self._abort_download()
2053
+ return
2054
+
2055
+ # Change the 'download' button to an 'abort' button.
2056
+ self._download_button["text"] = "Cancel"
2057
+
2058
+ marked = [
2059
+ self._table[row, "Identifier"]
2060
+ for row in range(len(self._table))
2061
+ if self._table[row, 0] != ""
2062
+ ]
2063
+ selection = self._table.selected_row()
2064
+ if not marked and selection is not None:
2065
+ marked = [self._table[selection, "Identifier"]]
2066
+
2067
+ # Create a new data server object for the download operation,
2068
+ # just in case the user modifies our data server during the
2069
+ # download (e.g., clicking 'refresh' or editing the index url).
2070
+ ds = Downloader(self._ds.url, self._ds.download_dir)
2071
+
2072
+ # Start downloading in a separate thread.
2073
+ assert self._download_msg_queue == []
2074
+ assert self._download_abort_queue == []
2075
+ self._DownloadThread(
2076
+ ds,
2077
+ marked,
2078
+ self._download_lock,
2079
+ self._download_msg_queue,
2080
+ self._download_abort_queue,
2081
+ ).start()
2082
+
2083
+ # Monitor the download message queue & display its progress.
2084
+ self._log_indent = 0
2085
+ self._downloading = True
2086
+ self._monitor_message_queue()
2087
+
2088
+ # Display an indication that we're still alive and well by
2089
+ # cycling the progress bar.
2090
+ self._progress_alive()
2091
+
2092
+ def _abort_download(self):
2093
+ if self._downloading:
2094
+ self._download_lock.acquire()
2095
+ self._download_abort_queue.append("abort")
2096
+ self._download_lock.release()
2097
+
2098
+ class _DownloadThread(threading.Thread):
2099
+ def __init__(self, data_server, items, lock, message_queue, abort):
2100
+ self.data_server = data_server
2101
+ self.items = items
2102
+ self.lock = lock
2103
+ self.message_queue = message_queue
2104
+ self.abort = abort
2105
+ threading.Thread.__init__(self)
2106
+
2107
+ def run(self):
2108
+ for msg in self.data_server.incr_download(self.items):
2109
+ self.lock.acquire()
2110
+ self.message_queue.append(msg)
2111
+ # Check if we've been told to kill ourselves:
2112
+ if self.abort:
2113
+ self.message_queue.append("aborted")
2114
+ self.lock.release()
2115
+ return
2116
+ self.lock.release()
2117
+ self.lock.acquire()
2118
+ self.message_queue.append("finished")
2119
+ self.lock.release()
2120
+
2121
+ _MONITOR_QUEUE_DELAY = 100
2122
+
2123
+ def _monitor_message_queue(self):
2124
+ def show(s):
2125
+ self._progresslabel["text"] = s
2126
+ self._log(s)
2127
+
2128
+ # Try to acquire the lock; if it's busy, then just try again later.
2129
+ if not self._download_lock.acquire():
2130
+ return
2131
+ for msg in self._download_msg_queue:
2132
+
2133
+ # Done downloading?
2134
+ if msg == "finished" or msg == "aborted":
2135
+ # self._fill_table(sort=False)
2136
+ self._update_table_status()
2137
+ self._downloading = False
2138
+ self._download_button["text"] = "Download"
2139
+ del self._download_msg_queue[:]
2140
+ del self._download_abort_queue[:]
2141
+ self._download_lock.release()
2142
+ if msg == "aborted":
2143
+ show("Download aborted!")
2144
+ self._show_progress(None)
2145
+ else:
2146
+ afterid = self.top.after(100, self._show_progress, None)
2147
+ self._afterid["_monitor_message_queue"] = afterid
2148
+ return
2149
+
2150
+ # All other messages
2151
+ elif isinstance(msg, ProgressMessage):
2152
+ self._show_progress(msg.progress)
2153
+ elif isinstance(msg, ErrorMessage):
2154
+ show(msg.message)
2155
+ if msg.package is not None:
2156
+ self._select(msg.package.id)
2157
+ self._show_progress(None)
2158
+ self._downloading = False
2159
+ return # halt progress.
2160
+ elif isinstance(msg, StartCollectionMessage):
2161
+ show("Downloading collection %r" % msg.collection.id)
2162
+ self._log_indent += 1
2163
+ elif isinstance(msg, StartPackageMessage):
2164
+ self._ds.clear_status_cache(msg.package.id)
2165
+ show("Downloading package %r" % msg.package.id)
2166
+ elif isinstance(msg, UpToDateMessage):
2167
+ show("Package %s is up-to-date!" % msg.package.id)
2168
+ # elif isinstance(msg, StaleMessage):
2169
+ # show('Package %s is out-of-date or corrupt; updating it' %
2170
+ # msg.package.id)
2171
+ elif isinstance(msg, FinishDownloadMessage):
2172
+ show("Finished downloading %r." % msg.package.id)
2173
+ elif isinstance(msg, StartUnzipMessage):
2174
+ show("Unzipping %s" % msg.package.filename)
2175
+ elif isinstance(msg, FinishUnzipMessage):
2176
+ show("Finished installing %s" % msg.package.id)
2177
+ elif isinstance(msg, FinishCollectionMessage):
2178
+ self._log_indent -= 1
2179
+ show("Finished downloading collection %r." % msg.collection.id)
2180
+ self._clear_mark(msg.collection.id)
2181
+ elif isinstance(msg, FinishPackageMessage):
2182
+ self._update_table_status()
2183
+ self._clear_mark(msg.package.id)
2184
+
2185
+ # Let the user know when we're aborting a download (but
2186
+ # waiting for a good point to abort it, so we don't end up
2187
+ # with a partially unzipped package or anything like that).
2188
+ if self._download_abort_queue:
2189
+ self._progresslabel["text"] = "Aborting download..."
2190
+
2191
+ # Clear the message queue and then release the lock
2192
+ del self._download_msg_queue[:]
2193
+ self._download_lock.release()
2194
+
2195
+ # Check the queue again after MONITOR_QUEUE_DELAY msec.
2196
+ afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
2197
+ self._afterid["_monitor_message_queue"] = afterid
2198
+
2199
+
2200
+ ######################################################################
2201
+ # Helper Functions
2202
+ ######################################################################
2203
+ # [xx] It may make sense to move these to nltk.internals.
2204
+
2205
+
2206
+ def md5_hexdigest(file):
2207
+ """
2208
+ Calculate and return the MD5 checksum for a given file.
2209
+ ``file`` may either be a filename or an open stream.
2210
+ """
2211
+ if isinstance(file, str):
2212
+ with open(file, "rb") as infile:
2213
+ return _md5_hexdigest(infile)
2214
+ return _md5_hexdigest(file)
2215
+
2216
+
2217
+ def _md5_hexdigest(fp):
2218
+ md5_digest = md5()
2219
+ while True:
2220
+ block = fp.read(1024 * 16) # 16k blocks
2221
+ if not block:
2222
+ break
2223
+ md5_digest.update(block)
2224
+ return md5_digest.hexdigest()
2225
+
2226
+
2227
+ # change this to periodically yield progress messages?
2228
+ # [xx] get rid of topdir parameter -- we should be checking
2229
+ # this when we build the index, anyway.
2230
+ def unzip(filename, root, verbose=True):
2231
+ """
2232
+ Extract the contents of the zip file ``filename`` into the
2233
+ directory ``root``.
2234
+ """
2235
+ for message in _unzip_iter(filename, root, verbose):
2236
+ if isinstance(message, ErrorMessage):
2237
+ raise Exception(message)
2238
+
2239
+
2240
+ def _unzip_iter(filename, root, verbose=True):
2241
+ if verbose:
2242
+ sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
2243
+ sys.stdout.flush()
2244
+
2245
+ try:
2246
+ zf = zipfile.ZipFile(filename)
2247
+ except zipfile.error as e:
2248
+ yield ErrorMessage(filename, "Error with downloaded zip file")
2249
+ return
2250
+ except Exception as e:
2251
+ yield ErrorMessage(filename, e)
2252
+ return
2253
+
2254
+ zf.extractall(root)
2255
+
2256
+ if verbose:
2257
+ print()
2258
+
2259
+
2260
+ ######################################################################
2261
+ # Index Builder
2262
+ ######################################################################
2263
+ # This may move to a different file sometime.
2264
+
2265
+
2266
+ def build_index(root, base_url):
2267
+ """
2268
+ Create a new data.xml index file, by combining the xml description
2269
+ files for various packages and collections. ``root`` should be the
2270
+ path to a directory containing the package xml and zip files; and
2271
+ the collection xml files. The ``root`` directory is expected to
2272
+ have the following subdirectories::
2273
+
2274
+ root/
2275
+ packages/ .................. subdirectory for packages
2276
+ corpora/ ................. zip & xml files for corpora
2277
+ grammars/ ................ zip & xml files for grammars
2278
+ taggers/ ................. zip & xml files for taggers
2279
+ tokenizers/ .............. zip & xml files for tokenizers
2280
+ etc.
2281
+ collections/ ............... xml files for collections
2282
+
2283
+ For each package, there should be two files: ``package.zip``
2284
+ (where *package* is the package name)
2285
+ which contains the package itself as a compressed zip file; and
2286
+ ``package.xml``, which is an xml description of the package. The
2287
+ zipfile ``package.zip`` should expand to a single subdirectory
2288
+ named ``package/``. The base filename ``package`` must match
2289
+ the identifier given in the package's xml file.
2290
+
2291
+ For each collection, there should be a single file ``collection.zip``
2292
+ describing the collection, where *collection* is the name of the collection.
2293
+
2294
+ All identifiers (for both packages and collections) must be unique.
2295
+ """
2296
+ # Find all packages.
2297
+ packages = []
2298
+ for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
2299
+ zipstat = os.stat(zf.filename)
2300
+ url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}"
2301
+ unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
2302
+
2303
+ # Fill in several fields of the package xml with calculated values.
2304
+ pkg_xml.set("unzipped_size", "%s" % unzipped_size)
2305
+ pkg_xml.set("size", "%s" % zipstat.st_size)
2306
+ pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
2307
+ pkg_xml.set("subdir", subdir)
2308
+ # pkg_xml.set('svn_revision', _svn_revision(zf.filename))
2309
+ if not pkg_xml.get("url"):
2310
+ pkg_xml.set("url", url)
2311
+
2312
+ # Record the package.
2313
+ packages.append(pkg_xml)
2314
+
2315
+ # Find all collections
2316
+ collections = list(_find_collections(os.path.join(root, "collections")))
2317
+
2318
+ # Check that all UIDs are unique
2319
+ uids = set()
2320
+ for item in packages + collections:
2321
+ if item.get("id") in uids:
2322
+ raise ValueError("Duplicate UID: %s" % item.get("id"))
2323
+ uids.add(item.get("id"))
2324
+
2325
+ # Put it all together
2326
+ top_elt = ElementTree.Element("nltk_data")
2327
+ top_elt.append(ElementTree.Element("packages"))
2328
+ top_elt[0].extend(sorted(packages, key=lambda package: package.get("id")))
2329
+ top_elt.append(ElementTree.Element("collections"))
2330
+ top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id")))
2331
+
2332
+ _indent_xml(top_elt)
2333
+ return top_elt
2334
+
2335
+
2336
+ def _indent_xml(xml, prefix=""):
2337
+ """
2338
+ Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
2339
+ (and its descendents) ``text`` and ``tail`` attributes to generate
2340
+ an indented tree, where each nested element is indented by 2
2341
+ spaces with respect to its parent.
2342
+ """
2343
+ if len(xml) > 0:
2344
+ xml.text = (xml.text or "").strip() + "\n" + prefix + " "
2345
+ for child in xml:
2346
+ _indent_xml(child, prefix + " ")
2347
+ for child in xml[:-1]:
2348
+ child.tail = (child.tail or "").strip() + "\n" + prefix + " "
2349
+ xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
2350
+
2351
+
2352
+ def _check_package(pkg_xml, zipfilename, zf):
2353
+ """
2354
+ Helper for ``build_index()``: Perform some checks to make sure that
2355
+ the given package is consistent.
2356
+ """
2357
+ # The filename must patch the id given in the XML file.
2358
+ uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
2359
+ if pkg_xml.get("id") != uid:
2360
+ raise ValueError(
2361
+ "package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid)
2362
+ )
2363
+
2364
+ # Zip file must expand to a subdir whose name matches uid.
2365
+ if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
2366
+ raise ValueError(
2367
+ "Zipfile %s.zip does not expand to a single "
2368
+ "subdirectory %s/" % (uid, uid)
2369
+ )
2370
+
2371
+
2372
+ # update for git?
2373
+ def _svn_revision(filename):
2374
+ """
2375
+ Helper for ``build_index()``: Calculate the subversion revision
2376
+ number for a given file (by using ``subprocess`` to run ``svn``).
2377
+ """
2378
+ p = subprocess.Popen(
2379
+ ["svn", "status", "-v", filename],
2380
+ stdout=subprocess.PIPE,
2381
+ stderr=subprocess.PIPE,
2382
+ )
2383
+ (stdout, stderr) = p.communicate()
2384
+ if p.returncode != 0 or stderr or not stdout:
2385
+ raise ValueError(
2386
+ "Error determining svn_revision for %s: %s"
2387
+ % (os.path.split(filename)[1], textwrap.fill(stderr))
2388
+ )
2389
+ return stdout.split()[2]
2390
+
2391
+
2392
+ def _find_collections(root):
2393
+ """
2394
+ Helper for ``build_index()``: Yield a list of ElementTree.Element
2395
+ objects, each holding the xml for a single package collection.
2396
+ """
2397
+ for dirname, _subdirs, files in os.walk(root):
2398
+ for filename in files:
2399
+ if filename.endswith(".xml"):
2400
+ xmlfile = os.path.join(dirname, filename)
2401
+ yield ElementTree.parse(xmlfile).getroot()
2402
+
2403
+
2404
+ def _find_packages(root):
2405
+ """
2406
+ Helper for ``build_index()``: Yield a list of tuples
2407
+ ``(pkg_xml, zf, subdir)``, where:
2408
+ - ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
2409
+ package
2410
+ - ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
2411
+ - ``subdir`` is the subdirectory (relative to ``root``) where
2412
+ the package was found (e.g. 'corpora' or 'grammars').
2413
+ """
2414
+ from nltk.corpus.reader.util import _path_from
2415
+
2416
+ # Find all packages.
2417
+ packages = []
2418
+ for dirname, subdirs, files in os.walk(root):
2419
+ relpath = "/".join(_path_from(root, dirname))
2420
+ for filename in files:
2421
+ if filename.endswith(".xml"):
2422
+ xmlfilename = os.path.join(dirname, filename)
2423
+ zipfilename = xmlfilename[:-4] + ".zip"
2424
+ try:
2425
+ zf = zipfile.ZipFile(zipfilename)
2426
+ except Exception as e:
2427
+ raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e
2428
+ try:
2429
+ pkg_xml = ElementTree.parse(xmlfilename).getroot()
2430
+ except Exception as e:
2431
+ raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e
2432
+
2433
+ # Check that the UID matches the filename
2434
+ uid = os.path.split(xmlfilename[:-4])[1]
2435
+ if pkg_xml.get("id") != uid:
2436
+ raise ValueError(
2437
+ "package identifier mismatch (%s "
2438
+ "vs %s)" % (pkg_xml.get("id"), uid)
2439
+ )
2440
+
2441
+ # Check that the zipfile expands to a subdir whose
2442
+ # name matches the uid.
2443
+ if sum(
2444
+ (name != uid and not name.startswith(uid + "/"))
2445
+ for name in zf.namelist()
2446
+ ):
2447
+ raise ValueError(
2448
+ "Zipfile %s.zip does not expand to a "
2449
+ "single subdirectory %s/" % (uid, uid)
2450
+ )
2451
+
2452
+ yield pkg_xml, zf, relpath
2453
+
2454
+ elif filename.endswith(".zip"):
2455
+ # Warn user in case a .xml does not exist for a .zip
2456
+ resourcename = os.path.splitext(filename)[0]
2457
+ xmlfilename = os.path.join(dirname, resourcename + ".xml")
2458
+ if not os.path.exists(xmlfilename):
2459
+ warnings.warn(
2460
+ f"{filename} exists, but {resourcename + '.xml'} cannot be found! "
2461
+ f"This could mean that {resourcename} can not be downloaded.",
2462
+ stacklevel=2,
2463
+ )
2464
+
2465
+ # Don't recurse into svn subdirectories:
2466
+ try:
2467
+ subdirs.remove(".svn")
2468
+ except ValueError:
2469
+ pass
2470
+
2471
+
2472
+ ######################################################################
2473
+ # Main:
2474
+ ######################################################################
2475
+
2476
+ # There should be a command-line interface
2477
+
2478
+ # Aliases
2479
+ _downloader = Downloader()
2480
+ download = _downloader.download
2481
+
2482
+
2483
+ def download_shell():
2484
+ DownloaderShell(_downloader).run()
2485
+
2486
+
2487
+ def download_gui():
2488
+ DownloaderGUI(_downloader).mainloop()
2489
+
2490
+
2491
+ def update():
2492
+ _downloader.update()
2493
+
2494
+
2495
+ if __name__ == "__main__":
2496
+ from optparse import OptionParser
2497
+
2498
+ parser = OptionParser()
2499
+ parser.add_option(
2500
+ "-d",
2501
+ "--dir",
2502
+ dest="dir",
2503
+ help="download package to directory DIR",
2504
+ metavar="DIR",
2505
+ )
2506
+ parser.add_option(
2507
+ "-q",
2508
+ "--quiet",
2509
+ dest="quiet",
2510
+ action="store_true",
2511
+ default=False,
2512
+ help="work quietly",
2513
+ )
2514
+ parser.add_option(
2515
+ "-f",
2516
+ "--force",
2517
+ dest="force",
2518
+ action="store_true",
2519
+ default=False,
2520
+ help="download even if already installed",
2521
+ )
2522
+ parser.add_option(
2523
+ "-e",
2524
+ "--exit-on-error",
2525
+ dest="halt_on_error",
2526
+ action="store_true",
2527
+ default=False,
2528
+ help="exit if an error occurs",
2529
+ )
2530
+ parser.add_option(
2531
+ "-u",
2532
+ "--url",
2533
+ dest="server_index_url",
2534
+ default=os.environ.get("NLTK_DOWNLOAD_URL"),
2535
+ help="download server index url",
2536
+ )
2537
+
2538
+ (options, args) = parser.parse_args()
2539
+
2540
+ downloader = Downloader(server_index_url=options.server_index_url)
2541
+
2542
+ if args:
2543
+ for pkg_id in args:
2544
+ rv = downloader.download(
2545
+ info_or_id=pkg_id,
2546
+ download_dir=options.dir,
2547
+ quiet=options.quiet,
2548
+ force=options.force,
2549
+ halt_on_error=options.halt_on_error,
2550
+ )
2551
+ if rv == False and options.halt_on_error:
2552
+ break
2553
+ else:
2554
+ downloader.download(
2555
+ download_dir=options.dir,
2556
+ quiet=options.quiet,
2557
+ force=options.force,
2558
+ halt_on_error=options.halt_on_error,
2559
+ )
env-llmeval/lib/python3.10/site-packages/nltk/featstruct.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/nltk/grammar.py ADDED
@@ -0,0 +1,1708 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Context Free Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Jason Narad <[email protected]>
7
+ # Peter Ljunglöf <[email protected]>
8
+ # Tom Aarsen <>
9
+ # URL: <https://www.nltk.org/>
10
+ # For license information, see LICENSE.TXT
11
+ #
12
+
13
+ """
14
+ Basic data classes for representing context free grammars. A
15
+ "grammar" specifies which trees can represent the structure of a
16
+ given text. Each of these trees is called a "parse tree" for the
17
+ text (or simply a "parse"). In a "context free" grammar, the set of
18
+ parse trees for any piece of a text can depend only on that piece, and
19
+ not on the rest of the text (i.e., the piece's context). Context free
20
+ grammars are often used to find possible syntactic structures for
21
+ sentences. In this context, the leaves of a parse tree are word
22
+ tokens; and the node values are phrasal categories, such as ``NP``
23
+ and ``VP``.
24
+
25
+ The ``CFG`` class is used to encode context free grammars. Each
26
+ ``CFG`` consists of a start symbol and a set of productions.
27
+ The "start symbol" specifies the root node value for parse trees. For example,
28
+ the start symbol for syntactic parsing is usually ``S``. Start
29
+ symbols are encoded using the ``Nonterminal`` class, which is discussed
30
+ below.
31
+
32
+ A Grammar's "productions" specify what parent-child relationships a parse
33
+ tree can contain. Each production specifies that a particular
34
+ node can be the parent of a particular set of children. For example,
35
+ the production ``<S> -> <NP> <VP>`` specifies that an ``S`` node can
36
+ be the parent of an ``NP`` node and a ``VP`` node.
37
+
38
+ Grammar productions are implemented by the ``Production`` class.
39
+ Each ``Production`` consists of a left hand side and a right hand
40
+ side. The "left hand side" is a ``Nonterminal`` that specifies the
41
+ node type for a potential parent; and the "right hand side" is a list
42
+ that specifies allowable children for that parent. This lists
43
+ consists of ``Nonterminals`` and text types: each ``Nonterminal``
44
+ indicates that the corresponding child may be a ``TreeToken`` with the
45
+ specified node type; and each text type indicates that the
46
+ corresponding child may be a ``Token`` with the with that type.
47
+
48
+ The ``Nonterminal`` class is used to distinguish node values from leaf
49
+ values. This prevents the grammar from accidentally using a leaf
50
+ value (such as the English word "A") as the node of a subtree. Within
51
+ a ``CFG``, all node values are wrapped in the ``Nonterminal``
52
+ class. Note, however, that the trees that are specified by the grammar do
53
+ *not* include these ``Nonterminal`` wrappers.
54
+
55
+ Grammars can also be given a more procedural interpretation. According to
56
+ this interpretation, a Grammar specifies any tree structure *tree* that
57
+ can be produced by the following procedure:
58
+
59
+ | Set tree to the start symbol
60
+ | Repeat until tree contains no more nonterminal leaves:
61
+ | Choose a production prod with whose left hand side
62
+ | lhs is a nonterminal leaf of tree.
63
+ | Replace the nonterminal leaf with a subtree, whose node
64
+ | value is the value wrapped by the nonterminal lhs, and
65
+ | whose children are the right hand side of prod.
66
+
67
+ The operation of replacing the left hand side (*lhs*) of a production
68
+ with the right hand side (*rhs*) in a tree (*tree*) is known as
69
+ "expanding" *lhs* to *rhs* in *tree*.
70
+ """
71
+ import re
72
+ from functools import total_ordering
73
+
74
+ from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader
75
+ from nltk.internals import raise_unorderable_types
76
+ from nltk.probability import ImmutableProbabilisticMixIn
77
+ from nltk.util import invert_graph, transitive_closure
78
+
79
+ #################################################################
80
+ # Nonterminal
81
+ #################################################################
82
+
83
+
84
+ @total_ordering
85
+ class Nonterminal:
86
+ """
87
+ A non-terminal symbol for a context free grammar. ``Nonterminal``
88
+ is a wrapper class for node values; it is used by ``Production``
89
+ objects to distinguish node values from leaf values.
90
+ The node value that is wrapped by a ``Nonterminal`` is known as its
91
+ "symbol". Symbols are typically strings representing phrasal
92
+ categories (such as ``"NP"`` or ``"VP"``). However, more complex
93
+ symbol types are sometimes used (e.g., for lexicalized grammars).
94
+ Since symbols are node values, they must be immutable and
95
+ hashable. Two ``Nonterminals`` are considered equal if their
96
+ symbols are equal.
97
+
98
+ :see: ``CFG``, ``Production``
99
+ :type _symbol: any
100
+ :ivar _symbol: The node value corresponding to this
101
+ ``Nonterminal``. This value must be immutable and hashable.
102
+ """
103
+
104
+ def __init__(self, symbol):
105
+ """
106
+ Construct a new non-terminal from the given symbol.
107
+
108
+ :type symbol: any
109
+ :param symbol: The node value corresponding to this
110
+ ``Nonterminal``. This value must be immutable and
111
+ hashable.
112
+ """
113
+ self._symbol = symbol
114
+
115
+ def symbol(self):
116
+ """
117
+ Return the node value corresponding to this ``Nonterminal``.
118
+
119
+ :rtype: (any)
120
+ """
121
+ return self._symbol
122
+
123
+ def __eq__(self, other):
124
+ """
125
+ Return True if this non-terminal is equal to ``other``. In
126
+ particular, return True if ``other`` is a ``Nonterminal``
127
+ and this non-terminal's symbol is equal to ``other`` 's symbol.
128
+
129
+ :rtype: bool
130
+ """
131
+ return type(self) == type(other) and self._symbol == other._symbol
132
+
133
+ def __ne__(self, other):
134
+ return not self == other
135
+
136
+ def __lt__(self, other):
137
+ if not isinstance(other, Nonterminal):
138
+ raise_unorderable_types("<", self, other)
139
+ return self._symbol < other._symbol
140
+
141
+ def __hash__(self):
142
+ return hash(self._symbol)
143
+
144
+ def __repr__(self):
145
+ """
146
+ Return a string representation for this ``Nonterminal``.
147
+
148
+ :rtype: str
149
+ """
150
+ if isinstance(self._symbol, str):
151
+ return "%s" % self._symbol
152
+ else:
153
+ return "%s" % repr(self._symbol)
154
+
155
+ def __str__(self):
156
+ """
157
+ Return a string representation for this ``Nonterminal``.
158
+
159
+ :rtype: str
160
+ """
161
+ if isinstance(self._symbol, str):
162
+ return "%s" % self._symbol
163
+ else:
164
+ return "%s" % repr(self._symbol)
165
+
166
+ def __div__(self, rhs):
167
+ """
168
+ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
169
+ the symbol for this nonterminal, and ``B`` is the symbol for rhs.
170
+
171
+ :param rhs: The nonterminal used to form the right hand side
172
+ of the new nonterminal.
173
+ :type rhs: Nonterminal
174
+ :rtype: Nonterminal
175
+ """
176
+ return Nonterminal(f"{self._symbol}/{rhs._symbol}")
177
+
178
+ def __truediv__(self, rhs):
179
+ """
180
+ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
181
+ the symbol for this nonterminal, and ``B`` is the symbol for rhs.
182
+ This function allows use of the slash ``/`` operator with
183
+ the future import of division.
184
+
185
+ :param rhs: The nonterminal used to form the right hand side
186
+ of the new nonterminal.
187
+ :type rhs: Nonterminal
188
+ :rtype: Nonterminal
189
+ """
190
+ return self.__div__(rhs)
191
+
192
+
193
+ def nonterminals(symbols):
194
+ """
195
+ Given a string containing a list of symbol names, return a list of
196
+ ``Nonterminals`` constructed from those symbols.
197
+
198
+ :param symbols: The symbol name string. This string can be
199
+ delimited by either spaces or commas.
200
+ :type symbols: str
201
+ :return: A list of ``Nonterminals`` constructed from the symbol
202
+ names given in ``symbols``. The ``Nonterminals`` are sorted
203
+ in the same order as the symbols names.
204
+ :rtype: list(Nonterminal)
205
+ """
206
+ if "," in symbols:
207
+ symbol_list = symbols.split(",")
208
+ else:
209
+ symbol_list = symbols.split()
210
+ return [Nonterminal(s.strip()) for s in symbol_list]
211
+
212
+
213
+ class FeatStructNonterminal(FeatDict, Nonterminal):
214
+ """A feature structure that's also a nonterminal. It acts as its
215
+ own symbol, and automatically freezes itself when hashed."""
216
+
217
+ def __hash__(self):
218
+ self.freeze()
219
+ return FeatStruct.__hash__(self)
220
+
221
+ def symbol(self):
222
+ return self
223
+
224
+
225
+ def is_nonterminal(item):
226
+ """
227
+ :return: True if the item is a ``Nonterminal``.
228
+ :rtype: bool
229
+ """
230
+ return isinstance(item, Nonterminal)
231
+
232
+
233
+ #################################################################
234
+ # Terminals
235
+ #################################################################
236
+
237
+
238
+ def is_terminal(item):
239
+ """
240
+ Return True if the item is a terminal, which currently is
241
+ if it is hashable and not a ``Nonterminal``.
242
+
243
+ :rtype: bool
244
+ """
245
+ return hasattr(item, "__hash__") and not isinstance(item, Nonterminal)
246
+
247
+
248
+ #################################################################
249
+ # Productions
250
+ #################################################################
251
+
252
+
253
+ @total_ordering
254
+ class Production:
255
+ """
256
+ A grammar production. Each production maps a single symbol
257
+ on the "left-hand side" to a sequence of symbols on the
258
+ "right-hand side". (In the case of context-free productions,
259
+ the left-hand side must be a ``Nonterminal``, and the right-hand
260
+ side is a sequence of terminals and ``Nonterminals``.)
261
+ "terminals" can be any immutable hashable object that is
262
+ not a ``Nonterminal``. Typically, terminals are strings
263
+ representing words, such as ``"dog"`` or ``"under"``.
264
+
265
+ :see: ``CFG``
266
+ :see: ``DependencyGrammar``
267
+ :see: ``Nonterminal``
268
+ :type _lhs: Nonterminal
269
+ :ivar _lhs: The left-hand side of the production.
270
+ :type _rhs: tuple(Nonterminal, terminal)
271
+ :ivar _rhs: The right-hand side of the production.
272
+ """
273
+
274
+ def __init__(self, lhs, rhs):
275
+ """
276
+ Construct a new ``Production``.
277
+
278
+ :param lhs: The left-hand side of the new ``Production``.
279
+ :type lhs: Nonterminal
280
+ :param rhs: The right-hand side of the new ``Production``.
281
+ :type rhs: sequence(Nonterminal and terminal)
282
+ """
283
+ if isinstance(rhs, str):
284
+ raise TypeError(
285
+ "production right hand side should be a list, " "not a string"
286
+ )
287
+ self._lhs = lhs
288
+ self._rhs = tuple(rhs)
289
+
290
+ def lhs(self):
291
+ """
292
+ Return the left-hand side of this ``Production``.
293
+
294
+ :rtype: Nonterminal
295
+ """
296
+ return self._lhs
297
+
298
+ def rhs(self):
299
+ """
300
+ Return the right-hand side of this ``Production``.
301
+
302
+ :rtype: sequence(Nonterminal and terminal)
303
+ """
304
+ return self._rhs
305
+
306
+ def __len__(self):
307
+ """
308
+ Return the length of the right-hand side.
309
+
310
+ :rtype: int
311
+ """
312
+ return len(self._rhs)
313
+
314
+ def is_nonlexical(self):
315
+ """
316
+ Return True if the right-hand side only contains ``Nonterminals``
317
+
318
+ :rtype: bool
319
+ """
320
+ return all(is_nonterminal(n) for n in self._rhs)
321
+
322
+ def is_lexical(self):
323
+ """
324
+ Return True if the right-hand contain at least one terminal token.
325
+
326
+ :rtype: bool
327
+ """
328
+ return not self.is_nonlexical()
329
+
330
+ def __str__(self):
331
+ """
332
+ Return a verbose string representation of the ``Production``.
333
+
334
+ :rtype: str
335
+ """
336
+ result = "%s -> " % repr(self._lhs)
337
+ result += " ".join(repr(el) for el in self._rhs)
338
+ return result
339
+
340
+ def __repr__(self):
341
+ """
342
+ Return a concise string representation of the ``Production``.
343
+
344
+ :rtype: str
345
+ """
346
+ return "%s" % self
347
+
348
+ def __eq__(self, other):
349
+ """
350
+ Return True if this ``Production`` is equal to ``other``.
351
+
352
+ :rtype: bool
353
+ """
354
+ return (
355
+ type(self) == type(other)
356
+ and self._lhs == other._lhs
357
+ and self._rhs == other._rhs
358
+ )
359
+
360
+ def __ne__(self, other):
361
+ return not self == other
362
+
363
+ def __lt__(self, other):
364
+ if not isinstance(other, Production):
365
+ raise_unorderable_types("<", self, other)
366
+ return (self._lhs, self._rhs) < (other._lhs, other._rhs)
367
+
368
+ def __hash__(self):
369
+ """
370
+ Return a hash value for the ``Production``.
371
+
372
+ :rtype: int
373
+ """
374
+ return hash((self._lhs, self._rhs))
375
+
376
+
377
+ class DependencyProduction(Production):
378
+ """
379
+ A dependency grammar production. Each production maps a single
380
+ head word to an unordered list of one or more modifier words.
381
+ """
382
+
383
+ def __str__(self):
384
+ """
385
+ Return a verbose string representation of the ``DependencyProduction``.
386
+
387
+ :rtype: str
388
+ """
389
+ result = f"'{self._lhs}' ->"
390
+ for elt in self._rhs:
391
+ result += f" '{elt}'"
392
+ return result
393
+
394
+
395
+ class ProbabilisticProduction(Production, ImmutableProbabilisticMixIn):
396
+ """
397
+ A probabilistic context free grammar production.
398
+ A PCFG ``ProbabilisticProduction`` is essentially just a ``Production`` that
399
+ has an associated probability, which represents how likely it is that
400
+ this production will be used. In particular, the probability of a
401
+ ``ProbabilisticProduction`` records the likelihood that its right-hand side is
402
+ the correct instantiation for any given occurrence of its left-hand side.
403
+
404
+ :see: ``Production``
405
+ """
406
+
407
+ def __init__(self, lhs, rhs, **prob):
408
+ """
409
+ Construct a new ``ProbabilisticProduction``.
410
+
411
+ :param lhs: The left-hand side of the new ``ProbabilisticProduction``.
412
+ :type lhs: Nonterminal
413
+ :param rhs: The right-hand side of the new ``ProbabilisticProduction``.
414
+ :type rhs: sequence(Nonterminal and terminal)
415
+ :param prob: Probability parameters of the new ``ProbabilisticProduction``.
416
+ """
417
+ ImmutableProbabilisticMixIn.__init__(self, **prob)
418
+ Production.__init__(self, lhs, rhs)
419
+
420
+ def __str__(self):
421
+ return super().__str__() + (
422
+ " [1.0]" if (self.prob() == 1.0) else " [%g]" % self.prob()
423
+ )
424
+
425
+ def __eq__(self, other):
426
+ return (
427
+ type(self) == type(other)
428
+ and self._lhs == other._lhs
429
+ and self._rhs == other._rhs
430
+ and self.prob() == other.prob()
431
+ )
432
+
433
+ def __ne__(self, other):
434
+ return not self == other
435
+
436
+ def __hash__(self):
437
+ return hash((self._lhs, self._rhs, self.prob()))
438
+
439
+
440
+ #################################################################
441
+ # Grammars
442
+ #################################################################
443
+
444
+
445
+ class CFG:
446
+ """
447
+ A context-free grammar. A grammar consists of a start state and
448
+ a set of productions. The set of terminals and nonterminals is
449
+ implicitly specified by the productions.
450
+
451
+ If you need efficient key-based access to productions, you
452
+ can use a subclass to implement it.
453
+ """
454
+
455
+ def __init__(self, start, productions, calculate_leftcorners=True):
456
+ """
457
+ Create a new context-free grammar, from the given start state
458
+ and set of ``Production`` instances.
459
+
460
+ :param start: The start symbol
461
+ :type start: Nonterminal
462
+ :param productions: The list of productions that defines the grammar
463
+ :type productions: list(Production)
464
+ :param calculate_leftcorners: False if we don't want to calculate the
465
+ leftcorner relation. In that case, some optimized chart parsers won't work.
466
+ :type calculate_leftcorners: bool
467
+ """
468
+ if not is_nonterminal(start):
469
+ raise TypeError(
470
+ "start should be a Nonterminal object,"
471
+ " not a %s" % type(start).__name__
472
+ )
473
+
474
+ self._start = start
475
+ self._productions = productions
476
+ self._categories = {prod.lhs() for prod in productions}
477
+ self._calculate_indexes()
478
+ self._calculate_grammar_forms()
479
+ if calculate_leftcorners:
480
+ self._calculate_leftcorners()
481
+
482
+ def _calculate_indexes(self):
483
+ self._lhs_index = {}
484
+ self._rhs_index = {}
485
+ self._empty_index = {}
486
+ self._lexical_index = {}
487
+ for prod in self._productions:
488
+ # Left hand side.
489
+ lhs = prod._lhs
490
+ if lhs not in self._lhs_index:
491
+ self._lhs_index[lhs] = []
492
+ self._lhs_index[lhs].append(prod)
493
+ if prod._rhs:
494
+ # First item in right hand side.
495
+ rhs0 = prod._rhs[0]
496
+ if rhs0 not in self._rhs_index:
497
+ self._rhs_index[rhs0] = []
498
+ self._rhs_index[rhs0].append(prod)
499
+ else:
500
+ # The right hand side is empty.
501
+ self._empty_index[prod.lhs()] = prod
502
+ # Lexical tokens in the right hand side.
503
+ for token in prod._rhs:
504
+ if is_terminal(token):
505
+ self._lexical_index.setdefault(token, set()).add(prod)
506
+
507
+ def _calculate_leftcorners(self):
508
+ # Calculate leftcorner relations, for use in optimized parsing.
509
+ self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
510
+ self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
511
+ for prod in self.productions():
512
+ if len(prod) > 0:
513
+ cat, left = prod.lhs(), prod.rhs()[0]
514
+ if is_nonterminal(left):
515
+ self._immediate_leftcorner_categories[cat].add(left)
516
+ else:
517
+ self._immediate_leftcorner_words[cat].add(left)
518
+
519
+ lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
520
+ self._leftcorners = lc
521
+ self._leftcorner_parents = invert_graph(lc)
522
+
523
+ nr_leftcorner_categories = sum(
524
+ map(len, self._immediate_leftcorner_categories.values())
525
+ )
526
+ nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
527
+ if nr_leftcorner_words > nr_leftcorner_categories > 10000:
528
+ # If the grammar is big, the leftcorner-word dictionary will be too large.
529
+ # In that case it is better to calculate the relation on demand.
530
+ self._leftcorner_words = None
531
+ return
532
+
533
+ self._leftcorner_words = {}
534
+ for cat in self._leftcorners:
535
+ lefts = self._leftcorners[cat]
536
+ lc = self._leftcorner_words[cat] = set()
537
+ for left in lefts:
538
+ lc.update(self._immediate_leftcorner_words.get(left, set()))
539
+
540
+ @classmethod
541
+ def fromstring(cls, input, encoding=None):
542
+ """
543
+ Return the grammar instance corresponding to the input string(s).
544
+
545
+ :param input: a grammar, either in the form of a string or as a list of strings.
546
+ """
547
+ start, productions = read_grammar(
548
+ input, standard_nonterm_parser, encoding=encoding
549
+ )
550
+ return cls(start, productions)
551
+
552
+ def start(self):
553
+ """
554
+ Return the start symbol of the grammar
555
+
556
+ :rtype: Nonterminal
557
+ """
558
+ return self._start
559
+
560
+ # tricky to balance readability and efficiency here!
561
+ # can't use set operations as they don't preserve ordering
562
+ def productions(self, lhs=None, rhs=None, empty=False):
563
+ """
564
+ Return the grammar productions, filtered by the left-hand side
565
+ or the first item in the right-hand side.
566
+
567
+ :param lhs: Only return productions with the given left-hand side.
568
+ :param rhs: Only return productions with the given first item
569
+ in the right-hand side.
570
+ :param empty: Only return productions with an empty right-hand side.
571
+ :return: A list of productions matching the given constraints.
572
+ :rtype: list(Production)
573
+ """
574
+ if rhs and empty:
575
+ raise ValueError(
576
+ "You cannot select empty and non-empty " "productions at the same time."
577
+ )
578
+
579
+ # no constraints so return everything
580
+ if not lhs and not rhs:
581
+ if not empty:
582
+ return self._productions
583
+ else:
584
+ return self._empty_index.values()
585
+
586
+ # only lhs specified so look up its index
587
+ elif lhs and not rhs:
588
+ if not empty:
589
+ return self._lhs_index.get(lhs, [])
590
+ elif lhs in self._empty_index:
591
+ return [self._empty_index[lhs]]
592
+ else:
593
+ return []
594
+
595
+ # only rhs specified so look up its index
596
+ elif rhs and not lhs:
597
+ return self._rhs_index.get(rhs, [])
598
+
599
+ # intersect
600
+ else:
601
+ return [
602
+ prod
603
+ for prod in self._lhs_index.get(lhs, [])
604
+ if prod in self._rhs_index.get(rhs, [])
605
+ ]
606
+
607
+ def leftcorners(self, cat):
608
+ """
609
+ Return the set of all nonterminals that the given nonterminal
610
+ can start with, including itself.
611
+
612
+ This is the reflexive, transitive closure of the immediate
613
+ leftcorner relation: (A > B) iff (A -> B beta)
614
+
615
+ :param cat: the parent of the leftcorners
616
+ :type cat: Nonterminal
617
+ :return: the set of all leftcorners
618
+ :rtype: set(Nonterminal)
619
+ """
620
+ return self._leftcorners.get(cat, {cat})
621
+
622
+ def is_leftcorner(self, cat, left):
623
+ """
624
+ True if left is a leftcorner of cat, where left can be a
625
+ terminal or a nonterminal.
626
+
627
+ :param cat: the parent of the leftcorner
628
+ :type cat: Nonterminal
629
+ :param left: the suggested leftcorner
630
+ :type left: Terminal or Nonterminal
631
+ :rtype: bool
632
+ """
633
+ if is_nonterminal(left):
634
+ return left in self.leftcorners(cat)
635
+ elif self._leftcorner_words:
636
+ return left in self._leftcorner_words.get(cat, set())
637
+ else:
638
+ return any(
639
+ left in self._immediate_leftcorner_words.get(parent, set())
640
+ for parent in self.leftcorners(cat)
641
+ )
642
+
643
+ def leftcorner_parents(self, cat):
644
+ """
645
+ Return the set of all nonterminals for which the given category
646
+ is a left corner. This is the inverse of the leftcorner relation.
647
+
648
+ :param cat: the suggested leftcorner
649
+ :type cat: Nonterminal
650
+ :return: the set of all parents to the leftcorner
651
+ :rtype: set(Nonterminal)
652
+ """
653
+ return self._leftcorner_parents.get(cat, {cat})
654
+
655
+ def check_coverage(self, tokens):
656
+ """
657
+ Check whether the grammar rules cover the given list of tokens.
658
+ If not, then raise an exception.
659
+
660
+ :type tokens: list(str)
661
+ """
662
+ missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
663
+ if missing:
664
+ missing = ", ".join(f"{w!r}" for w in missing)
665
+ raise ValueError(
666
+ "Grammar does not cover some of the " "input words: %r." % missing
667
+ )
668
+
669
+ def _calculate_grammar_forms(self):
670
+ """
671
+ Pre-calculate of which form(s) the grammar is.
672
+ """
673
+ prods = self._productions
674
+ self._is_lexical = all(p.is_lexical() for p in prods)
675
+ self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
676
+ self._min_len = min(len(p) for p in prods)
677
+ self._max_len = max(len(p) for p in prods)
678
+ self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
679
+
680
+ def is_lexical(self):
681
+ """
682
+ Return True if all productions are lexicalised.
683
+ """
684
+ return self._is_lexical
685
+
686
+ def is_nonlexical(self):
687
+ """
688
+ Return True if all lexical rules are "preterminals", that is,
689
+ unary rules which can be separated in a preprocessing step.
690
+
691
+ This means that all productions are of the forms
692
+ A -> B1 ... Bn (n>=0), or A -> "s".
693
+
694
+ Note: is_lexical() and is_nonlexical() are not opposites.
695
+ There are grammars which are neither, and grammars which are both.
696
+ """
697
+ return self._is_nonlexical
698
+
699
+ def min_len(self):
700
+ """
701
+ Return the right-hand side length of the shortest grammar production.
702
+ """
703
+ return self._min_len
704
+
705
+ def max_len(self):
706
+ """
707
+ Return the right-hand side length of the longest grammar production.
708
+ """
709
+ return self._max_len
710
+
711
+ def is_nonempty(self):
712
+ """
713
+ Return True if there are no empty productions.
714
+ """
715
+ return self._min_len > 0
716
+
717
+ def is_binarised(self):
718
+ """
719
+ Return True if all productions are at most binary.
720
+ Note that there can still be empty and unary productions.
721
+ """
722
+ return self._max_len <= 2
723
+
724
+ def is_flexible_chomsky_normal_form(self):
725
+ """
726
+ Return True if all productions are of the forms
727
+ A -> B C, A -> B, or A -> "s".
728
+ """
729
+ return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
730
+
731
+ def is_chomsky_normal_form(self):
732
+ """
733
+ Return True if the grammar is of Chomsky Normal Form, i.e. all productions
734
+ are of the form A -> B C, or A -> "s".
735
+ """
736
+ return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
737
+
738
+ def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
739
+ """
740
+ Returns a new Grammar that is in chomsky normal
741
+
742
+ :param: new_token_padding
743
+ Customise new rule formation during binarisation
744
+ """
745
+ if self.is_chomsky_normal_form():
746
+ return self
747
+ if self.productions(empty=True):
748
+ raise ValueError(
749
+ "Grammar has Empty rules. " "Cannot deal with them at the moment"
750
+ )
751
+
752
+ # check for mixed rules
753
+ for rule in self.productions():
754
+ if rule.is_lexical() and len(rule.rhs()) > 1:
755
+ raise ValueError(
756
+ f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
757
+ )
758
+
759
+ step1 = CFG.eliminate_start(self)
760
+ step2 = CFG.binarize(step1, new_token_padding)
761
+ if flexible:
762
+ return step2
763
+ step3 = CFG.remove_unitary_rules(step2)
764
+ step4 = CFG(step3.start(), list(set(step3.productions())))
765
+ return step4
766
+
767
+ @classmethod
768
+ def remove_unitary_rules(cls, grammar):
769
+ """
770
+ Remove nonlexical unitary rules and convert them to
771
+ lexical
772
+ """
773
+ result = []
774
+ unitary = []
775
+ for rule in grammar.productions():
776
+ if len(rule) == 1 and rule.is_nonlexical():
777
+ unitary.append(rule)
778
+ else:
779
+ result.append(rule)
780
+
781
+ while unitary:
782
+ rule = unitary.pop(0)
783
+ for item in grammar.productions(lhs=rule.rhs()[0]):
784
+ new_rule = Production(rule.lhs(), item.rhs())
785
+ if len(new_rule) != 1 or new_rule.is_lexical():
786
+ result.append(new_rule)
787
+ else:
788
+ unitary.append(new_rule)
789
+
790
+ n_grammar = CFG(grammar.start(), result)
791
+ return n_grammar
792
+
793
+ @classmethod
794
+ def binarize(cls, grammar, padding="@$@"):
795
+ """
796
+ Convert all non-binary rules into binary by introducing
797
+ new tokens.
798
+ Example::
799
+
800
+ Original:
801
+ A => B C D
802
+ After Conversion:
803
+ A => B A@$@B
804
+ A@$@B => C D
805
+ """
806
+ result = []
807
+
808
+ for rule in grammar.productions():
809
+ if len(rule.rhs()) > 2:
810
+ # this rule needs to be broken down
811
+ left_side = rule.lhs()
812
+ for k in range(0, len(rule.rhs()) - 2):
813
+ tsym = rule.rhs()[k]
814
+ new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
815
+ new_production = Production(left_side, (tsym, new_sym))
816
+ left_side = new_sym
817
+ result.append(new_production)
818
+ last_prd = Production(left_side, rule.rhs()[-2:])
819
+ result.append(last_prd)
820
+ else:
821
+ result.append(rule)
822
+
823
+ n_grammar = CFG(grammar.start(), result)
824
+ return n_grammar
825
+
826
+ @classmethod
827
+ def eliminate_start(cls, grammar):
828
+ """
829
+ Eliminate start rule in case it appears on RHS
830
+ Example: S -> S0 S1 and S0 -> S1 S
831
+ Then another rule S0_Sigma -> S is added
832
+ """
833
+ start = grammar.start()
834
+ result = []
835
+ need_to_add = None
836
+ for rule in grammar.productions():
837
+ if start in rule.rhs():
838
+ need_to_add = True
839
+ result.append(rule)
840
+ if need_to_add:
841
+ start = Nonterminal("S0_SIGMA")
842
+ result.append(Production(start, [grammar.start()]))
843
+ n_grammar = CFG(start, result)
844
+ return n_grammar
845
+ return grammar
846
+
847
+ def __repr__(self):
848
+ return "<Grammar with %d productions>" % len(self._productions)
849
+
850
+ def __str__(self):
851
+ result = "Grammar with %d productions" % len(self._productions)
852
+ result += " (start state = %r)" % self._start
853
+ for production in self._productions:
854
+ result += "\n %s" % production
855
+ return result
856
+
857
+
858
+ class FeatureGrammar(CFG):
859
+ """
860
+ A feature-based grammar. This is equivalent to a
861
+ ``CFG`` whose nonterminals are all
862
+ ``FeatStructNonterminal``.
863
+
864
+ A grammar consists of a start state and a set of
865
+ productions. The set of terminals and nonterminals
866
+ is implicitly specified by the productions.
867
+ """
868
+
869
+ def __init__(self, start, productions):
870
+ """
871
+ Create a new feature-based grammar, from the given start
872
+ state and set of ``Productions``.
873
+
874
+ :param start: The start symbol
875
+ :type start: FeatStructNonterminal
876
+ :param productions: The list of productions that defines the grammar
877
+ :type productions: list(Production)
878
+ """
879
+ CFG.__init__(self, start, productions)
880
+
881
+ # The difference with CFG is that the productions are
882
+ # indexed on the TYPE feature of the nonterminals.
883
+ # This is calculated by the method _get_type_if_possible().
884
+
885
+ def _calculate_indexes(self):
886
+ self._lhs_index = {}
887
+ self._rhs_index = {}
888
+ self._empty_index = {}
889
+ self._empty_productions = []
890
+ self._lexical_index = {}
891
+ for prod in self._productions:
892
+ # Left hand side.
893
+ lhs = self._get_type_if_possible(prod._lhs)
894
+ if lhs not in self._lhs_index:
895
+ self._lhs_index[lhs] = []
896
+ self._lhs_index[lhs].append(prod)
897
+ if prod._rhs:
898
+ # First item in right hand side.
899
+ rhs0 = self._get_type_if_possible(prod._rhs[0])
900
+ if rhs0 not in self._rhs_index:
901
+ self._rhs_index[rhs0] = []
902
+ self._rhs_index[rhs0].append(prod)
903
+ else:
904
+ # The right hand side is empty.
905
+ if lhs not in self._empty_index:
906
+ self._empty_index[lhs] = []
907
+ self._empty_index[lhs].append(prod)
908
+ self._empty_productions.append(prod)
909
+ # Lexical tokens in the right hand side.
910
+ for token in prod._rhs:
911
+ if is_terminal(token):
912
+ self._lexical_index.setdefault(token, set()).add(prod)
913
+
914
+ @classmethod
915
+ def fromstring(
916
+ cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None
917
+ ):
918
+ """
919
+ Return a feature structure based grammar.
920
+
921
+ :param input: a grammar, either in the form of a string or else
922
+ as a list of strings.
923
+ :param features: a tuple of features (default: SLASH, TYPE)
924
+ :param logic_parser: a parser for lambda-expressions,
925
+ by default, ``LogicParser()``
926
+ :param fstruct_reader: a feature structure parser
927
+ (only if features and logic_parser is None)
928
+ """
929
+ if features is None:
930
+ features = (SLASH, TYPE)
931
+
932
+ if fstruct_reader is None:
933
+ fstruct_reader = FeatStructReader(
934
+ features, FeatStructNonterminal, logic_parser=logic_parser
935
+ )
936
+ elif logic_parser is not None:
937
+ raise Exception(
938
+ "'logic_parser' and 'fstruct_reader' must " "not both be set"
939
+ )
940
+
941
+ start, productions = read_grammar(
942
+ input, fstruct_reader.read_partial, encoding=encoding
943
+ )
944
+ return cls(start, productions)
945
+
946
+ def productions(self, lhs=None, rhs=None, empty=False):
947
+ """
948
+ Return the grammar productions, filtered by the left-hand side
949
+ or the first item in the right-hand side.
950
+
951
+ :param lhs: Only return productions with the given left-hand side.
952
+ :param rhs: Only return productions with the given first item
953
+ in the right-hand side.
954
+ :param empty: Only return productions with an empty right-hand side.
955
+ :rtype: list(Production)
956
+ """
957
+ if rhs and empty:
958
+ raise ValueError(
959
+ "You cannot select empty and non-empty " "productions at the same time."
960
+ )
961
+
962
+ # no constraints so return everything
963
+ if not lhs and not rhs:
964
+ if empty:
965
+ return self._empty_productions
966
+ else:
967
+ return self._productions
968
+
969
+ # only lhs specified so look up its index
970
+ elif lhs and not rhs:
971
+ if empty:
972
+ return self._empty_index.get(self._get_type_if_possible(lhs), [])
973
+ else:
974
+ return self._lhs_index.get(self._get_type_if_possible(lhs), [])
975
+
976
+ # only rhs specified so look up its index
977
+ elif rhs and not lhs:
978
+ return self._rhs_index.get(self._get_type_if_possible(rhs), [])
979
+
980
+ # intersect
981
+ else:
982
+ return [
983
+ prod
984
+ for prod in self._lhs_index.get(self._get_type_if_possible(lhs), [])
985
+ if prod in self._rhs_index.get(self._get_type_if_possible(rhs), [])
986
+ ]
987
+
988
+ def leftcorners(self, cat):
989
+ """
990
+ Return the set of all words that the given category can start with.
991
+ Also called the "first set" in compiler construction.
992
+ """
993
+ raise NotImplementedError("Not implemented yet")
994
+
995
+ def leftcorner_parents(self, cat):
996
+ """
997
+ Return the set of all categories for which the given category
998
+ is a left corner.
999
+ """
1000
+ raise NotImplementedError("Not implemented yet")
1001
+
1002
+ def _get_type_if_possible(self, item):
1003
+ """
1004
+ Helper function which returns the ``TYPE`` feature of the ``item``,
1005
+ if it exists, otherwise it returns the ``item`` itself
1006
+ """
1007
+ if isinstance(item, dict) and TYPE in item:
1008
+ return FeatureValueType(item[TYPE])
1009
+ else:
1010
+ return item
1011
+
1012
+
1013
+ @total_ordering
1014
+ class FeatureValueType:
1015
+ """
1016
+ A helper class for ``FeatureGrammars``, designed to be different
1017
+ from ordinary strings. This is to stop the ``FeatStruct``
1018
+ ``FOO[]`` from being compare equal to the terminal "FOO".
1019
+ """
1020
+
1021
+ def __init__(self, value):
1022
+ self._value = value
1023
+
1024
+ def __repr__(self):
1025
+ return "<%s>" % self._value
1026
+
1027
+ def __eq__(self, other):
1028
+ return type(self) == type(other) and self._value == other._value
1029
+
1030
+ def __ne__(self, other):
1031
+ return not self == other
1032
+
1033
+ def __lt__(self, other):
1034
+ if not isinstance(other, FeatureValueType):
1035
+ raise_unorderable_types("<", self, other)
1036
+ return self._value < other._value
1037
+
1038
+ def __hash__(self):
1039
+ return hash(self._value)
1040
+
1041
+
1042
+ class DependencyGrammar:
1043
+ """
1044
+ A dependency grammar. A DependencyGrammar consists of a set of
1045
+ productions. Each production specifies a head/modifier relationship
1046
+ between a pair of words.
1047
+ """
1048
+
1049
+ def __init__(self, productions):
1050
+ """
1051
+ Create a new dependency grammar, from the set of ``Productions``.
1052
+
1053
+ :param productions: The list of productions that defines the grammar
1054
+ :type productions: list(Production)
1055
+ """
1056
+ self._productions = productions
1057
+
1058
+ @classmethod
1059
+ def fromstring(cls, input):
1060
+ productions = []
1061
+ for linenum, line in enumerate(input.split("\n")):
1062
+ line = line.strip()
1063
+ if line.startswith("#") or line == "":
1064
+ continue
1065
+ try:
1066
+ productions += _read_dependency_production(line)
1067
+ except ValueError as e:
1068
+ raise ValueError(f"Unable to parse line {linenum}: {line}") from e
1069
+ if len(productions) == 0:
1070
+ raise ValueError("No productions found!")
1071
+ return cls(productions)
1072
+
1073
+ def contains(self, head, mod):
1074
+ """
1075
+ :param head: A head word.
1076
+ :type head: str
1077
+ :param mod: A mod word, to test as a modifier of 'head'.
1078
+ :type mod: str
1079
+
1080
+ :return: true if this ``DependencyGrammar`` contains a
1081
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1082
+ :rtype: bool
1083
+ """
1084
+ for production in self._productions:
1085
+ for possibleMod in production._rhs:
1086
+ if production._lhs == head and possibleMod == mod:
1087
+ return True
1088
+ return False
1089
+
1090
+ def __contains__(self, head_mod):
1091
+ """
1092
+ Return True if this ``DependencyGrammar`` contains a
1093
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1094
+
1095
+ :param head_mod: A tuple of a head word and a mod word,
1096
+ to test as a modifier of 'head'.
1097
+ :type head: Tuple[str, str]
1098
+ :rtype: bool
1099
+ """
1100
+ try:
1101
+ head, mod = head_mod
1102
+ except ValueError as e:
1103
+ raise ValueError(
1104
+ "Must use a tuple of strings, e.g. `('price', 'of') in grammar`"
1105
+ ) from e
1106
+ return self.contains(head, mod)
1107
+
1108
+ # # should be rewritten, the set comp won't work in all comparisons
1109
+ # def contains_exactly(self, head, modlist):
1110
+ # for production in self._productions:
1111
+ # if(len(production._rhs) == len(modlist)):
1112
+ # if(production._lhs == head):
1113
+ # set1 = Set(production._rhs)
1114
+ # set2 = Set(modlist)
1115
+ # if(set1 == set2):
1116
+ # return True
1117
+ # return False
1118
+
1119
+ def __str__(self):
1120
+ """
1121
+ Return a verbose string representation of the ``DependencyGrammar``
1122
+
1123
+ :rtype: str
1124
+ """
1125
+ str = "Dependency grammar with %d productions" % len(self._productions)
1126
+ for production in self._productions:
1127
+ str += "\n %s" % production
1128
+ return str
1129
+
1130
+ def __repr__(self):
1131
+ """
1132
+ Return a concise string representation of the ``DependencyGrammar``
1133
+ """
1134
+ return "Dependency grammar with %d productions" % len(self._productions)
1135
+
1136
+
1137
+ class ProbabilisticDependencyGrammar:
1138
+ """ """
1139
+
1140
+ def __init__(self, productions, events, tags):
1141
+ self._productions = productions
1142
+ self._events = events
1143
+ self._tags = tags
1144
+
1145
+ def contains(self, head, mod):
1146
+ """
1147
+ Return True if this ``DependencyGrammar`` contains a
1148
+ ``DependencyProduction`` mapping 'head' to 'mod'.
1149
+
1150
+ :param head: A head word.
1151
+ :type head: str
1152
+ :param mod: A mod word, to test as a modifier of 'head'.
1153
+ :type mod: str
1154
+ :rtype: bool
1155
+ """
1156
+ for production in self._productions:
1157
+ for possibleMod in production._rhs:
1158
+ if production._lhs == head and possibleMod == mod:
1159
+ return True
1160
+ return False
1161
+
1162
+ def __str__(self):
1163
+ """
1164
+ Return a verbose string representation of the ``ProbabilisticDependencyGrammar``
1165
+
1166
+ :rtype: str
1167
+ """
1168
+ str = "Statistical dependency grammar with %d productions" % len(
1169
+ self._productions
1170
+ )
1171
+ for production in self._productions:
1172
+ str += "\n %s" % production
1173
+ str += "\nEvents:"
1174
+ for event in self._events:
1175
+ str += "\n %d:%s" % (self._events[event], event)
1176
+ str += "\nTags:"
1177
+ for tag_word in self._tags:
1178
+ str += f"\n {tag_word}:\t({self._tags[tag_word]})"
1179
+ return str
1180
+
1181
+ def __repr__(self):
1182
+ """
1183
+ Return a concise string representation of the ``ProbabilisticDependencyGrammar``
1184
+ """
1185
+ return "Statistical Dependency grammar with %d productions" % len(
1186
+ self._productions
1187
+ )
1188
+
1189
+
1190
+ class PCFG(CFG):
1191
+ """
1192
+ A probabilistic context-free grammar. A PCFG consists of a
1193
+ start state and a set of productions with probabilities. The set of
1194
+ terminals and nonterminals is implicitly specified by the productions.
1195
+
1196
+ PCFG productions use the ``ProbabilisticProduction`` class.
1197
+ ``PCFGs`` impose the constraint that the set of productions with
1198
+ any given left-hand-side must have probabilities that sum to 1
1199
+ (allowing for a small margin of error).
1200
+
1201
+ If you need efficient key-based access to productions, you can use
1202
+ a subclass to implement it.
1203
+
1204
+ :type EPSILON: float
1205
+ :cvar EPSILON: The acceptable margin of error for checking that
1206
+ productions with a given left-hand side have probabilities
1207
+ that sum to 1.
1208
+ """
1209
+
1210
+ EPSILON = 0.01
1211
+
1212
+ def __init__(self, start, productions, calculate_leftcorners=True):
1213
+ """
1214
+ Create a new context-free grammar, from the given start state
1215
+ and set of ``ProbabilisticProductions``.
1216
+
1217
+ :param start: The start symbol
1218
+ :type start: Nonterminal
1219
+ :param productions: The list of productions that defines the grammar
1220
+ :type productions: list(Production)
1221
+ :raise ValueError: if the set of productions with any left-hand-side
1222
+ do not have probabilities that sum to a value within
1223
+ EPSILON of 1.
1224
+ :param calculate_leftcorners: False if we don't want to calculate the
1225
+ leftcorner relation. In that case, some optimized chart parsers won't work.
1226
+ :type calculate_leftcorners: bool
1227
+ """
1228
+ CFG.__init__(self, start, productions, calculate_leftcorners)
1229
+
1230
+ # Make sure that the probabilities sum to one.
1231
+ probs = {}
1232
+ for production in productions:
1233
+ probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob()
1234
+ for (lhs, p) in probs.items():
1235
+ if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)):
1236
+ raise ValueError("Productions for %r do not sum to 1" % lhs)
1237
+
1238
+ @classmethod
1239
+ def fromstring(cls, input, encoding=None):
1240
+ """
1241
+ Return a probabilistic context-free grammar corresponding to the
1242
+ input string(s).
1243
+
1244
+ :param input: a grammar, either in the form of a string or else
1245
+ as a list of strings.
1246
+ """
1247
+ start, productions = read_grammar(
1248
+ input, standard_nonterm_parser, probabilistic=True, encoding=encoding
1249
+ )
1250
+ return cls(start, productions)
1251
+
1252
+
1253
+ #################################################################
1254
+ # Inducing Grammars
1255
+ #################################################################
1256
+
1257
+ # Contributed by Nathan Bodenstab <[email protected]>
1258
+
1259
+
1260
+ def induce_pcfg(start, productions):
1261
+ r"""
1262
+ Induce a PCFG grammar from a list of productions.
1263
+
1264
+ The probability of a production A -> B C in a PCFG is:
1265
+
1266
+ | count(A -> B C)
1267
+ | P(B, C | A) = --------------- where \* is any right hand side
1268
+ | count(A -> \*)
1269
+
1270
+ :param start: The start symbol
1271
+ :type start: Nonterminal
1272
+ :param productions: The list of productions that defines the grammar
1273
+ :type productions: list(Production)
1274
+ """
1275
+ # Production count: the number of times a given production occurs
1276
+ pcount = {}
1277
+
1278
+ # LHS-count: counts the number of times a given lhs occurs
1279
+ lcount = {}
1280
+
1281
+ for prod in productions:
1282
+ lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
1283
+ pcount[prod] = pcount.get(prod, 0) + 1
1284
+
1285
+ prods = [
1286
+ ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])
1287
+ for p in pcount
1288
+ ]
1289
+ return PCFG(start, prods)
1290
+
1291
+
1292
+ #################################################################
1293
+ # Helper functions for reading productions
1294
+ #################################################################
1295
+
1296
+
1297
+ def _read_cfg_production(input):
1298
+ """
1299
+ Return a list of context-free ``Productions``.
1300
+ """
1301
+ return _read_production(input, standard_nonterm_parser)
1302
+
1303
+
1304
+ def _read_pcfg_production(input):
1305
+ """
1306
+ Return a list of PCFG ``ProbabilisticProductions``.
1307
+ """
1308
+ return _read_production(input, standard_nonterm_parser, probabilistic=True)
1309
+
1310
+
1311
+ def _read_fcfg_production(input, fstruct_reader):
1312
+ """
1313
+ Return a list of feature-based ``Productions``.
1314
+ """
1315
+ return _read_production(input, fstruct_reader)
1316
+
1317
+
1318
+ # Parsing generic grammars
1319
+
1320
+ _ARROW_RE = re.compile(r"\s* -> \s*", re.VERBOSE)
1321
+ _PROBABILITY_RE = re.compile(r"( \[ [\d\.]+ \] ) \s*", re.VERBOSE)
1322
+ _TERMINAL_RE = re.compile(r'( "[^"]*" | \'[^\']*\' ) \s*', re.VERBOSE)
1323
+ _DISJUNCTION_RE = re.compile(r"\| \s*", re.VERBOSE)
1324
+
1325
+
1326
+ def _read_production(line, nonterm_parser, probabilistic=False):
1327
+ """
1328
+ Parse a grammar rule, given as a string, and return
1329
+ a list of productions.
1330
+ """
1331
+ pos = 0
1332
+
1333
+ # Parse the left-hand side.
1334
+ lhs, pos = nonterm_parser(line, pos)
1335
+
1336
+ # Skip over the arrow.
1337
+ m = _ARROW_RE.match(line, pos)
1338
+ if not m:
1339
+ raise ValueError("Expected an arrow")
1340
+ pos = m.end()
1341
+
1342
+ # Parse the right hand side.
1343
+ probabilities = [0.0]
1344
+ rhsides = [[]]
1345
+ while pos < len(line):
1346
+ # Probability.
1347
+ m = _PROBABILITY_RE.match(line, pos)
1348
+ if probabilistic and m:
1349
+ pos = m.end()
1350
+ probabilities[-1] = float(m.group(1)[1:-1])
1351
+ if probabilities[-1] > 1.0:
1352
+ raise ValueError(
1353
+ "Production probability %f, "
1354
+ "should not be greater than 1.0" % (probabilities[-1],)
1355
+ )
1356
+
1357
+ # String -- add terminal.
1358
+ elif line[pos] in "'\"":
1359
+ m = _TERMINAL_RE.match(line, pos)
1360
+ if not m:
1361
+ raise ValueError("Unterminated string")
1362
+ rhsides[-1].append(m.group(1)[1:-1])
1363
+ pos = m.end()
1364
+
1365
+ # Vertical bar -- start new rhside.
1366
+ elif line[pos] == "|":
1367
+ m = _DISJUNCTION_RE.match(line, pos)
1368
+ probabilities.append(0.0)
1369
+ rhsides.append([])
1370
+ pos = m.end()
1371
+
1372
+ # Anything else -- nonterminal.
1373
+ else:
1374
+ nonterm, pos = nonterm_parser(line, pos)
1375
+ rhsides[-1].append(nonterm)
1376
+
1377
+ if probabilistic:
1378
+ return [
1379
+ ProbabilisticProduction(lhs, rhs, prob=probability)
1380
+ for (rhs, probability) in zip(rhsides, probabilities)
1381
+ ]
1382
+ else:
1383
+ return [Production(lhs, rhs) for rhs in rhsides]
1384
+
1385
+
1386
+ #################################################################
1387
+ # Reading Phrase Structure Grammars
1388
+ #################################################################
1389
+
1390
+
1391
+ def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None):
1392
+ """
1393
+ Return a pair consisting of a starting category and a list of
1394
+ ``Productions``.
1395
+
1396
+ :param input: a grammar, either in the form of a string or else
1397
+ as a list of strings.
1398
+ :param nonterm_parser: a function for parsing nonterminals.
1399
+ It should take a ``(string, position)`` as argument and
1400
+ return a ``(nonterminal, position)`` as result.
1401
+ :param probabilistic: are the grammar rules probabilistic?
1402
+ :type probabilistic: bool
1403
+ :param encoding: the encoding of the grammar, if it is a binary string
1404
+ :type encoding: str
1405
+ """
1406
+ if encoding is not None:
1407
+ input = input.decode(encoding)
1408
+ if isinstance(input, str):
1409
+ lines = input.split("\n")
1410
+ else:
1411
+ lines = input
1412
+
1413
+ start = None
1414
+ productions = []
1415
+ continue_line = ""
1416
+ for linenum, line in enumerate(lines):
1417
+ line = continue_line + line.strip()
1418
+ if line.startswith("#") or line == "":
1419
+ continue
1420
+ if line.endswith("\\"):
1421
+ continue_line = line[:-1].rstrip() + " "
1422
+ continue
1423
+ continue_line = ""
1424
+ try:
1425
+ if line[0] == "%":
1426
+ directive, args = line[1:].split(None, 1)
1427
+ if directive == "start":
1428
+ start, pos = nonterm_parser(args, 0)
1429
+ if pos != len(args):
1430
+ raise ValueError("Bad argument to start directive")
1431
+ else:
1432
+ raise ValueError("Bad directive")
1433
+ else:
1434
+ # expand out the disjunctions on the RHS
1435
+ productions += _read_production(line, nonterm_parser, probabilistic)
1436
+ except ValueError as e:
1437
+ raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e
1438
+
1439
+ if not productions:
1440
+ raise ValueError("No productions found!")
1441
+ if not start:
1442
+ start = productions[0].lhs()
1443
+ return (start, productions)
1444
+
1445
+
1446
+ _STANDARD_NONTERM_RE = re.compile(r"( [\w/][\w/^<>-]* ) \s*", re.VERBOSE)
1447
+
1448
+
1449
+ def standard_nonterm_parser(string, pos):
1450
+ m = _STANDARD_NONTERM_RE.match(string, pos)
1451
+ if not m:
1452
+ raise ValueError("Expected a nonterminal, found: " + string[pos:])
1453
+ return (Nonterminal(m.group(1)), m.end())
1454
+
1455
+
1456
+ #################################################################
1457
+ # Reading Dependency Grammars
1458
+ #################################################################
1459
+
1460
+ _READ_DG_RE = re.compile(
1461
+ r"""^\s* # leading whitespace
1462
+ ('[^']+')\s* # single-quoted lhs
1463
+ (?:[-=]+>)\s* # arrow
1464
+ (?:( # rhs:
1465
+ "[^"]+" # doubled-quoted terminal
1466
+ | '[^']+' # single-quoted terminal
1467
+ | \| # disjunction
1468
+ )
1469
+ \s*) # trailing space
1470
+ *$""", # zero or more copies
1471
+ re.VERBOSE,
1472
+ )
1473
+ _SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""")
1474
+
1475
+
1476
+ def _read_dependency_production(s):
1477
+ if not _READ_DG_RE.match(s):
1478
+ raise ValueError("Bad production string")
1479
+ pieces = _SPLIT_DG_RE.split(s)
1480
+ pieces = [p for i, p in enumerate(pieces) if i % 2 == 1]
1481
+ lhside = pieces[0].strip("'\"")
1482
+ rhsides = [[]]
1483
+ for piece in pieces[2:]:
1484
+ if piece == "|":
1485
+ rhsides.append([])
1486
+ else:
1487
+ rhsides[-1].append(piece.strip("'\""))
1488
+ return [DependencyProduction(lhside, rhside) for rhside in rhsides]
1489
+
1490
+
1491
+ #################################################################
1492
+ # Demonstration
1493
+ #################################################################
1494
+
1495
+
1496
+ def cfg_demo():
1497
+ """
1498
+ A demonstration showing how ``CFGs`` can be created and used.
1499
+ """
1500
+
1501
+ from nltk import CFG, Production, nonterminals
1502
+
1503
+ # Create some nonterminals
1504
+ S, NP, VP, PP = nonterminals("S, NP, VP, PP")
1505
+ N, V, P, Det = nonterminals("N, V, P, Det")
1506
+ VP_slash_NP = VP / NP
1507
+
1508
+ print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP])
1509
+ print(" S.symbol() =>", repr(S.symbol()))
1510
+ print()
1511
+
1512
+ print(Production(S, [NP]))
1513
+
1514
+ # Create some Grammar Productions
1515
+ grammar = CFG.fromstring(
1516
+ """
1517
+ S -> NP VP
1518
+ PP -> P NP
1519
+ NP -> Det N | NP PP
1520
+ VP -> V NP | VP PP
1521
+ Det -> 'a' | 'the'
1522
+ N -> 'dog' | 'cat'
1523
+ V -> 'chased' | 'sat'
1524
+ P -> 'on' | 'in'
1525
+ """
1526
+ )
1527
+
1528
+ print("A Grammar:", repr(grammar))
1529
+ print(" grammar.start() =>", repr(grammar.start()))
1530
+ print(" grammar.productions() =>", end=" ")
1531
+ # Use string.replace(...) is to line-wrap the output.
1532
+ print(repr(grammar.productions()).replace(",", ",\n" + " " * 25))
1533
+ print()
1534
+
1535
+
1536
+ def pcfg_demo():
1537
+ """
1538
+ A demonstration showing how a ``PCFG`` can be created and used.
1539
+ """
1540
+
1541
+ from nltk import induce_pcfg, treetransforms
1542
+ from nltk.corpus import treebank
1543
+ from nltk.parse import pchart
1544
+
1545
+ toy_pcfg1 = PCFG.fromstring(
1546
+ """
1547
+ S -> NP VP [1.0]
1548
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
1549
+ Det -> 'the' [0.8] | 'my' [0.2]
1550
+ N -> 'man' [0.5] | 'telescope' [0.5]
1551
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
1552
+ V -> 'ate' [0.35] | 'saw' [0.65]
1553
+ PP -> P NP [1.0]
1554
+ P -> 'with' [0.61] | 'under' [0.39]
1555
+ """
1556
+ )
1557
+
1558
+ toy_pcfg2 = PCFG.fromstring(
1559
+ """
1560
+ S -> NP VP [1.0]
1561
+ VP -> V NP [.59]
1562
+ VP -> V [.40]
1563
+ VP -> VP PP [.01]
1564
+ NP -> Det N [.41]
1565
+ NP -> Name [.28]
1566
+ NP -> NP PP [.31]
1567
+ PP -> P NP [1.0]
1568
+ V -> 'saw' [.21]
1569
+ V -> 'ate' [.51]
1570
+ V -> 'ran' [.28]
1571
+ N -> 'boy' [.11]
1572
+ N -> 'cookie' [.12]
1573
+ N -> 'table' [.13]
1574
+ N -> 'telescope' [.14]
1575
+ N -> 'hill' [.5]
1576
+ Name -> 'Jack' [.52]
1577
+ Name -> 'Bob' [.48]
1578
+ P -> 'with' [.61]
1579
+ P -> 'under' [.39]
1580
+ Det -> 'the' [.41]
1581
+ Det -> 'a' [.31]
1582
+ Det -> 'my' [.28]
1583
+ """
1584
+ )
1585
+
1586
+ pcfg_prods = toy_pcfg1.productions()
1587
+
1588
+ pcfg_prod = pcfg_prods[2]
1589
+ print("A PCFG production:", repr(pcfg_prod))
1590
+ print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs()))
1591
+ print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs()))
1592
+ print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob()))
1593
+ print()
1594
+
1595
+ grammar = toy_pcfg2
1596
+ print("A PCFG grammar:", repr(grammar))
1597
+ print(" grammar.start() =>", repr(grammar.start()))
1598
+ print(" grammar.productions() =>", end=" ")
1599
+ # Use .replace(...) is to line-wrap the output.
1600
+ print(repr(grammar.productions()).replace(",", ",\n" + " " * 26))
1601
+ print()
1602
+
1603
+ # extract productions from three trees and induce the PCFG
1604
+ print("Induce PCFG grammar from treebank data:")
1605
+
1606
+ productions = []
1607
+ item = treebank._fileids[0]
1608
+ for tree in treebank.parsed_sents(item)[:3]:
1609
+ # perform optional tree transformations, e.g.:
1610
+ tree.collapse_unary(collapsePOS=False)
1611
+ tree.chomsky_normal_form(horzMarkov=2)
1612
+
1613
+ productions += tree.productions()
1614
+
1615
+ S = Nonterminal("S")
1616
+ grammar = induce_pcfg(S, productions)
1617
+ print(grammar)
1618
+ print()
1619
+
1620
+ print("Parse sentence using induced grammar:")
1621
+
1622
+ parser = pchart.InsideChartParser(grammar)
1623
+ parser.trace(3)
1624
+
1625
+ # doesn't work as tokens are different:
1626
+ # sent = treebank.tokenized('wsj_0001.mrg')[0]
1627
+
1628
+ sent = treebank.parsed_sents(item)[0].leaves()
1629
+ print(sent)
1630
+ for parse in parser.parse(sent):
1631
+ print(parse)
1632
+
1633
+
1634
+ def fcfg_demo():
1635
+ import nltk.data
1636
+
1637
+ g = nltk.data.load("grammars/book_grammars/feat0.fcfg")
1638
+ print(g)
1639
+ print()
1640
+
1641
+
1642
+ def dg_demo():
1643
+ """
1644
+ A demonstration showing the creation and inspection of a
1645
+ ``DependencyGrammar``.
1646
+ """
1647
+ grammar = DependencyGrammar.fromstring(
1648
+ """
1649
+ 'scratch' -> 'cats' | 'walls'
1650
+ 'walls' -> 'the'
1651
+ 'cats' -> 'the'
1652
+ """
1653
+ )
1654
+ print(grammar)
1655
+
1656
+
1657
+ def sdg_demo():
1658
+ """
1659
+ A demonstration of how to read a string representation of
1660
+ a CoNLL format dependency tree.
1661
+ """
1662
+ from nltk.parse import DependencyGraph
1663
+
1664
+ dg = DependencyGraph(
1665
+ """
1666
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
1667
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
1668
+ 3 met met Prep Prep voor 8 mod _ _
1669
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
1670
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
1671
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
1672
+ 7 gaan ga V V hulp|inf 6 vc _ _
1673
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
1674
+ 9 , , Punc Punc komma 8 punct _ _
1675
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
1676
+ 11 of of Conj Conj neven 7 vc _ _
1677
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
1678
+ 13 . . Punc Punc punt 12 punct _ _
1679
+ """
1680
+ )
1681
+ tree = dg.tree()
1682
+ print(tree.pprint())
1683
+
1684
+
1685
+ def demo():
1686
+ cfg_demo()
1687
+ pcfg_demo()
1688
+ fcfg_demo()
1689
+ dg_demo()
1690
+ sdg_demo()
1691
+
1692
+
1693
+ if __name__ == "__main__":
1694
+ demo()
1695
+
1696
+ __all__ = [
1697
+ "Nonterminal",
1698
+ "nonterminals",
1699
+ "CFG",
1700
+ "Production",
1701
+ "PCFG",
1702
+ "ProbabilisticProduction",
1703
+ "DependencyGrammar",
1704
+ "DependencyProduction",
1705
+ "ProbabilisticDependencyGrammar",
1706
+ "induce_pcfg",
1707
+ "read_grammar",
1708
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/help.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit (NLTK) Help
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Provide structured access to documentation.
10
+ """
11
+
12
+ import re
13
+ from textwrap import wrap
14
+
15
+ from nltk.data import load
16
+
17
+
18
+ def brown_tagset(tagpattern=None):
19
+ _format_tagset("brown_tagset", tagpattern)
20
+
21
+
22
+ def claws5_tagset(tagpattern=None):
23
+ _format_tagset("claws5_tagset", tagpattern)
24
+
25
+
26
+ def upenn_tagset(tagpattern=None):
27
+ _format_tagset("upenn_tagset", tagpattern)
28
+
29
+
30
+ #####################################################################
31
+ # UTILITIES
32
+ #####################################################################
33
+
34
+
35
+ def _print_entries(tags, tagdict):
36
+ for tag in tags:
37
+ entry = tagdict[tag]
38
+ defn = [tag + ": " + entry[0]]
39
+ examples = wrap(
40
+ entry[1], width=75, initial_indent=" ", subsequent_indent=" "
41
+ )
42
+ print("\n".join(defn + examples))
43
+
44
+
45
+ def _format_tagset(tagset, tagpattern=None):
46
+ tagdict = load("help/tagsets/" + tagset + ".pickle")
47
+ if not tagpattern:
48
+ _print_entries(sorted(tagdict), tagdict)
49
+ elif tagpattern in tagdict:
50
+ _print_entries([tagpattern], tagdict)
51
+ else:
52
+ tagpattern = re.compile(tagpattern)
53
+ tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
54
+ if tags:
55
+ _print_entries(tags, tagdict)
56
+ else:
57
+ print("No matching tags found.")
58
+
59
+
60
+ if __name__ == "__main__":
61
+ brown_tagset(r"NN.*")
62
+ upenn_tagset(r".*\$")
63
+ claws5_tagset("UNDEFINED")
64
+ brown_tagset(r"NN")
env-llmeval/lib/python3.10/site-packages/nltk/internals.py ADDED
@@ -0,0 +1,1123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Internal utility functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Nitin Madnani <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import fnmatch
11
+ import locale
12
+ import os
13
+ import re
14
+ import stat
15
+ import subprocess
16
+ import sys
17
+ import textwrap
18
+ import types
19
+ import warnings
20
+ from xml.etree import ElementTree
21
+
22
+ ##########################################################################
23
+ # Java Via Command-Line
24
+ ##########################################################################
25
+
26
+ _java_bin = None
27
+ _java_options = []
28
+ # [xx] add classpath option to config_java?
29
+ def config_java(bin=None, options=None, verbose=False):
30
+ """
31
+ Configure nltk's java interface, by letting nltk know where it can
32
+ find the Java binary, and what extra options (if any) should be
33
+ passed to Java when it is run.
34
+
35
+ :param bin: The full path to the Java binary. If not specified,
36
+ then nltk will search the system for a Java binary; and if
37
+ one is not found, it will raise a ``LookupError`` exception.
38
+ :type bin: str
39
+ :param options: A list of options that should be passed to the
40
+ Java binary when it is called. A common value is
41
+ ``'-Xmx512m'``, which tells Java binary to increase
42
+ the maximum heap size to 512 megabytes. If no options are
43
+ specified, then do not modify the options list.
44
+ :type options: list(str)
45
+ """
46
+ global _java_bin, _java_options
47
+ _java_bin = find_binary(
48
+ "java",
49
+ bin,
50
+ env_vars=["JAVAHOME", "JAVA_HOME"],
51
+ verbose=verbose,
52
+ binary_names=["java.exe"],
53
+ )
54
+
55
+ if options is not None:
56
+ if isinstance(options, str):
57
+ options = options.split()
58
+ _java_options = list(options)
59
+
60
+
61
+ def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True):
62
+ """
63
+ Execute the given java command, by opening a subprocess that calls
64
+ Java. If java has not yet been configured, it will be configured
65
+ by calling ``config_java()`` with no arguments.
66
+
67
+ :param cmd: The java command that should be called, formatted as
68
+ a list of strings. Typically, the first string will be the name
69
+ of the java class; and the remaining strings will be arguments
70
+ for that java class.
71
+ :type cmd: list(str)
72
+
73
+ :param classpath: A ``':'`` separated list of directories, JAR
74
+ archives, and ZIP archives to search for class files.
75
+ :type classpath: str
76
+
77
+ :param stdin: Specify the executed program's
78
+ standard input file handles, respectively. Valid values are ``subprocess.PIPE``,
79
+ an existing file descriptor (a positive integer), an existing
80
+ file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a
81
+ new pipe to the child should be created. With None, no
82
+ redirection will occur; the child's file handles will be
83
+ inherited from the parent. Additionally, stderr can be
84
+ ``subprocess.STDOUT``, which indicates that the stderr data
85
+ from the applications should be captured into the same file
86
+ handle as for stdout.
87
+
88
+ :param stdout: Specify the executed program's standard output file
89
+ handle. See ``stdin`` for valid values.
90
+
91
+ :param stderr: Specify the executed program's standard error file
92
+ handle. See ``stdin`` for valid values.
93
+
94
+
95
+ :param blocking: If ``false``, then return immediately after
96
+ spawning the subprocess. In this case, the return value is
97
+ the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
98
+
99
+ :return: If ``blocking=True``, then return a tuple ``(stdout,
100
+ stderr)``, containing the stdout and stderr outputs generated
101
+ by the java command if the ``stdout`` and ``stderr`` parameters
102
+ were set to ``subprocess.PIPE``; or None otherwise. If
103
+ ``blocking=False``, then return a ``subprocess.Popen`` object.
104
+
105
+ :raise OSError: If the java command returns a nonzero return code.
106
+ """
107
+
108
+ subprocess_output_dict = {
109
+ "pipe": subprocess.PIPE,
110
+ "stdout": subprocess.STDOUT,
111
+ "devnull": subprocess.DEVNULL,
112
+ }
113
+
114
+ stdin = subprocess_output_dict.get(stdin, stdin)
115
+ stdout = subprocess_output_dict.get(stdout, stdout)
116
+ stderr = subprocess_output_dict.get(stderr, stderr)
117
+
118
+ if isinstance(cmd, str):
119
+ raise TypeError("cmd should be a list of strings")
120
+
121
+ # Make sure we know where a java binary is.
122
+ if _java_bin is None:
123
+ config_java()
124
+
125
+ # Set up the classpath.
126
+ if isinstance(classpath, str):
127
+ classpaths = [classpath]
128
+ else:
129
+ classpaths = list(classpath)
130
+ classpath = os.path.pathsep.join(classpaths)
131
+
132
+ # Construct the full command string.
133
+ cmd = list(cmd)
134
+ cmd = ["-cp", classpath] + cmd
135
+ cmd = [_java_bin] + _java_options + cmd
136
+
137
+ # Call java via a subprocess
138
+ p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
139
+ if not blocking:
140
+ return p
141
+ (stdout, stderr) = p.communicate()
142
+
143
+ # Check the return code.
144
+ if p.returncode != 0:
145
+ print(_decode_stdoutdata(stderr))
146
+ raise OSError("Java command failed : " + str(cmd))
147
+
148
+ return (stdout, stderr)
149
+
150
+
151
+ ######################################################################
152
+ # Parsing
153
+ ######################################################################
154
+
155
+
156
+ class ReadError(ValueError):
157
+ """
158
+ Exception raised by read_* functions when they fail.
159
+ :param position: The index in the input string where an error occurred.
160
+ :param expected: What was expected when an error occurred.
161
+ """
162
+
163
+ def __init__(self, expected, position):
164
+ ValueError.__init__(self, expected, position)
165
+ self.expected = expected
166
+ self.position = position
167
+
168
+ def __str__(self):
169
+ return f"Expected {self.expected} at {self.position}"
170
+
171
+
172
+ _STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
173
+
174
+
175
+ def read_str(s, start_position):
176
+ """
177
+ If a Python string literal begins at the specified position in the
178
+ given string, then return a tuple ``(val, end_position)``
179
+ containing the value of the string literal and the position where
180
+ it ends. Otherwise, raise a ``ReadError``.
181
+
182
+ :param s: A string that will be checked to see if within which a
183
+ Python string literal exists.
184
+ :type s: str
185
+
186
+ :param start_position: The specified beginning position of the string ``s``
187
+ to begin regex matching.
188
+ :type start_position: int
189
+
190
+ :return: A tuple containing the matched string literal evaluated as a
191
+ string and the end position of the string literal.
192
+ :rtype: tuple(str, int)
193
+
194
+ :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
195
+ match in ``s`` at ``start_position``, i.e., open quote. If the
196
+ ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
197
+ end of the first match, i.e., close quote.
198
+ :raise ValueError: If an invalid string (i.e., contains an invalid
199
+ escape sequence) is passed into the ``eval``.
200
+
201
+ :Example:
202
+
203
+ >>> from nltk.internals import read_str
204
+ >>> read_str('"Hello", World!', 0)
205
+ ('Hello', 7)
206
+
207
+ """
208
+ # Read the open quote, and any modifiers.
209
+ m = _STRING_START_RE.match(s, start_position)
210
+ if not m:
211
+ raise ReadError("open quote", start_position)
212
+ quotemark = m.group(1)
213
+
214
+ # Find the close quote.
215
+ _STRING_END_RE = re.compile(r"\\|%s" % quotemark)
216
+ position = m.end()
217
+ while True:
218
+ match = _STRING_END_RE.search(s, position)
219
+ if not match:
220
+ raise ReadError("close quote", position)
221
+ if match.group(0) == "\\":
222
+ position = match.end() + 1
223
+ else:
224
+ break
225
+
226
+ # Process it, using eval. Strings with invalid escape sequences
227
+ # might raise ValueError.
228
+ try:
229
+ return eval(s[start_position : match.end()]), match.end()
230
+ except ValueError as e:
231
+ raise ReadError("valid escape sequence", start_position) from e
232
+
233
+
234
+ _READ_INT_RE = re.compile(r"-?\d+")
235
+
236
+
237
+ def read_int(s, start_position):
238
+ """
239
+ If an integer begins at the specified position in the given
240
+ string, then return a tuple ``(val, end_position)`` containing the
241
+ value of the integer and the position where it ends. Otherwise,
242
+ raise a ``ReadError``.
243
+
244
+ :param s: A string that will be checked to see if within which a
245
+ Python integer exists.
246
+ :type s: str
247
+
248
+ :param start_position: The specified beginning position of the string ``s``
249
+ to begin regex matching.
250
+ :type start_position: int
251
+
252
+ :return: A tuple containing the matched integer casted to an int,
253
+ and the end position of the int in ``s``.
254
+ :rtype: tuple(int, int)
255
+
256
+ :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
257
+ match in ``s`` at ``start_position``.
258
+
259
+ :Example:
260
+
261
+ >>> from nltk.internals import read_int
262
+ >>> read_int('42 is the answer', 0)
263
+ (42, 2)
264
+
265
+ """
266
+ m = _READ_INT_RE.match(s, start_position)
267
+ if not m:
268
+ raise ReadError("integer", start_position)
269
+ return int(m.group()), m.end()
270
+
271
+
272
+ _READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?")
273
+
274
+
275
+ def read_number(s, start_position):
276
+ """
277
+ If an integer or float begins at the specified position in the
278
+ given string, then return a tuple ``(val, end_position)``
279
+ containing the value of the number and the position where it ends.
280
+ Otherwise, raise a ``ReadError``.
281
+
282
+ :param s: A string that will be checked to see if within which a
283
+ Python number exists.
284
+ :type s: str
285
+
286
+ :param start_position: The specified beginning position of the string ``s``
287
+ to begin regex matching.
288
+ :type start_position: int
289
+
290
+ :return: A tuple containing the matched number casted to a ``float``,
291
+ and the end position of the number in ``s``.
292
+ :rtype: tuple(float, int)
293
+
294
+ :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
295
+ match in ``s`` at ``start_position``.
296
+
297
+ :Example:
298
+
299
+ >>> from nltk.internals import read_number
300
+ >>> read_number('Pi is 3.14159', 6)
301
+ (3.14159, 13)
302
+
303
+ """
304
+ m = _READ_NUMBER_VALUE.match(s, start_position)
305
+ if not m or not (m.group(1) or m.group(2)):
306
+ raise ReadError("number", start_position)
307
+ if m.group(2):
308
+ return float(m.group()), m.end()
309
+ else:
310
+ return int(m.group()), m.end()
311
+
312
+
313
+ ######################################################################
314
+ # Check if a method has been overridden
315
+ ######################################################################
316
+
317
+
318
+ def overridden(method):
319
+ """
320
+ :return: True if ``method`` overrides some method with the same
321
+ name in a base class. This is typically used when defining
322
+ abstract base classes or interfaces, to allow subclasses to define
323
+ either of two related methods:
324
+
325
+ >>> class EaterI:
326
+ ... '''Subclass must define eat() or batch_eat().'''
327
+ ... def eat(self, food):
328
+ ... if overridden(self.batch_eat):
329
+ ... return self.batch_eat([food])[0]
330
+ ... else:
331
+ ... raise NotImplementedError()
332
+ ... def batch_eat(self, foods):
333
+ ... return [self.eat(food) for food in foods]
334
+
335
+ :type method: instance method
336
+ """
337
+ if isinstance(method, types.MethodType) and method.__self__.__class__ is not None:
338
+ name = method.__name__
339
+ funcs = [
340
+ cls.__dict__[name]
341
+ for cls in _mro(method.__self__.__class__)
342
+ if name in cls.__dict__
343
+ ]
344
+ return len(funcs) > 1
345
+ else:
346
+ raise TypeError("Expected an instance method.")
347
+
348
+
349
+ def _mro(cls):
350
+ """
351
+ Return the method resolution order for ``cls`` -- i.e., a list
352
+ containing ``cls`` and all its base classes, in the order in which
353
+ they would be checked by ``getattr``. For new-style classes, this
354
+ is just cls.__mro__. For classic classes, this can be obtained by
355
+ a depth-first left-to-right traversal of ``__bases__``.
356
+ """
357
+ if isinstance(cls, type):
358
+ return cls.__mro__
359
+ else:
360
+ mro = [cls]
361
+ for base in cls.__bases__:
362
+ mro.extend(_mro(base))
363
+ return mro
364
+
365
+
366
+ ######################################################################
367
+ # Deprecation decorator & base class
368
+ ######################################################################
369
+ # [xx] dedent msg first if it comes from a docstring.
370
+
371
+
372
+ def _add_epytext_field(obj, field, message):
373
+ """Add an epytext @field to a given object's docstring."""
374
+ indent = ""
375
+ # If we already have a docstring, then add a blank line to separate
376
+ # it from the new field, and check its indentation.
377
+ if obj.__doc__:
378
+ obj.__doc__ = obj.__doc__.rstrip() + "\n\n"
379
+ indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs())
380
+ if indents:
381
+ indent = min(indents)
382
+ # If we don't have a docstring, add an empty one.
383
+ else:
384
+ obj.__doc__ = ""
385
+
386
+ obj.__doc__ += textwrap.fill(
387
+ f"@{field}: {message}",
388
+ initial_indent=indent,
389
+ subsequent_indent=indent + " ",
390
+ )
391
+
392
+
393
+ def deprecated(message):
394
+ """
395
+ A decorator used to mark functions as deprecated. This will cause
396
+ a warning to be printed the when the function is used. Usage:
397
+
398
+ >>> from nltk.internals import deprecated
399
+ >>> @deprecated('Use foo() instead')
400
+ ... def bar(x):
401
+ ... print(x/10)
402
+
403
+ """
404
+
405
+ def decorator(func):
406
+ msg = f"Function {func.__name__}() has been deprecated. {message}"
407
+ msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
408
+
409
+ def newFunc(*args, **kwargs):
410
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
411
+ return func(*args, **kwargs)
412
+
413
+ # Copy the old function's name, docstring, & dict
414
+ newFunc.__dict__.update(func.__dict__)
415
+ newFunc.__name__ = func.__name__
416
+ newFunc.__doc__ = func.__doc__
417
+ newFunc.__deprecated__ = True
418
+ # Add a @deprecated field to the docstring.
419
+ _add_epytext_field(newFunc, "deprecated", message)
420
+ return newFunc
421
+
422
+ return decorator
423
+
424
+
425
+ class Deprecated:
426
+ """
427
+ A base class used to mark deprecated classes. A typical usage is to
428
+ alert users that the name of a class has changed:
429
+
430
+ >>> from nltk.internals import Deprecated
431
+ >>> class NewClassName:
432
+ ... pass # All logic goes here.
433
+ ...
434
+ >>> class OldClassName(Deprecated, NewClassName):
435
+ ... "Use NewClassName instead."
436
+
437
+ The docstring of the deprecated class will be used in the
438
+ deprecation warning message.
439
+ """
440
+
441
+ def __new__(cls, *args, **kwargs):
442
+ # Figure out which class is the deprecated one.
443
+ dep_cls = None
444
+ for base in _mro(cls):
445
+ if Deprecated in base.__bases__:
446
+ dep_cls = base
447
+ break
448
+ assert dep_cls, "Unable to determine which base is deprecated."
449
+
450
+ # Construct an appropriate warning.
451
+ doc = dep_cls.__doc__ or "".strip()
452
+ # If there's a @deprecated field, strip off the field marker.
453
+ doc = re.sub(r"\A\s*@deprecated:", r"", doc)
454
+ # Strip off any indentation.
455
+ doc = re.sub(r"(?m)^\s*", "", doc)
456
+ # Construct a 'name' string.
457
+ name = "Class %s" % dep_cls.__name__
458
+ if cls != dep_cls:
459
+ name += " (base class for %s)" % cls.__name__
460
+ # Put it all together.
461
+ msg = f"{name} has been deprecated. {doc}"
462
+ # Wrap it.
463
+ msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
464
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
465
+ # Do the actual work of __new__.
466
+ return object.__new__(cls)
467
+
468
+
469
+ ##########################################################################
470
+ # COUNTER, FOR UNIQUE NAMING
471
+ ##########################################################################
472
+
473
+
474
+ class Counter:
475
+ """
476
+ A counter that auto-increments each time its value is read.
477
+ """
478
+
479
+ def __init__(self, initial_value=0):
480
+ self._value = initial_value
481
+
482
+ def get(self):
483
+ self._value += 1
484
+ return self._value
485
+
486
+
487
+ ##########################################################################
488
+ # Search for files/binaries
489
+ ##########################################################################
490
+
491
+
492
+ def find_file_iter(
493
+ filename,
494
+ env_vars=(),
495
+ searchpath=(),
496
+ file_names=None,
497
+ url=None,
498
+ verbose=False,
499
+ finding_dir=False,
500
+ ):
501
+ """
502
+ Search for a file to be used by nltk.
503
+
504
+ :param filename: The name or path of the file.
505
+ :param env_vars: A list of environment variable names to check.
506
+ :param file_names: A list of alternative file names to check.
507
+ :param searchpath: List of directories to search.
508
+ :param url: URL presented to user for download help.
509
+ :param verbose: Whether or not to print path when a file is found.
510
+ """
511
+ file_names = [filename] + (file_names or [])
512
+ assert isinstance(filename, str)
513
+ assert not isinstance(file_names, str)
514
+ assert not isinstance(searchpath, str)
515
+ if isinstance(env_vars, str):
516
+ env_vars = env_vars.split()
517
+ yielded = False
518
+
519
+ # File exists, no magic
520
+ for alternative in file_names:
521
+ path_to_file = os.path.join(filename, alternative)
522
+ if os.path.isfile(path_to_file):
523
+ if verbose:
524
+ print(f"[Found {filename}: {path_to_file}]")
525
+ yielded = True
526
+ yield path_to_file
527
+ # Check the bare alternatives
528
+ if os.path.isfile(alternative):
529
+ if verbose:
530
+ print(f"[Found {filename}: {alternative}]")
531
+ yielded = True
532
+ yield alternative
533
+ # Check if the alternative is inside a 'file' directory
534
+ path_to_file = os.path.join(filename, "file", alternative)
535
+ if os.path.isfile(path_to_file):
536
+ if verbose:
537
+ print(f"[Found {filename}: {path_to_file}]")
538
+ yielded = True
539
+ yield path_to_file
540
+
541
+ # Check environment variables
542
+ for env_var in env_vars:
543
+ if env_var in os.environ:
544
+ if finding_dir: # This is to file a directory instead of file
545
+ yielded = True
546
+ yield os.environ[env_var]
547
+
548
+ for env_dir in os.environ[env_var].split(os.pathsep):
549
+ # Check if the environment variable contains a direct path to the bin
550
+ if os.path.isfile(env_dir):
551
+ if verbose:
552
+ print(f"[Found {filename}: {env_dir}]")
553
+ yielded = True
554
+ yield env_dir
555
+ # Check if the possible bin names exist inside the environment variable directories
556
+ for alternative in file_names:
557
+ path_to_file = os.path.join(env_dir, alternative)
558
+ if os.path.isfile(path_to_file):
559
+ if verbose:
560
+ print(f"[Found {filename}: {path_to_file}]")
561
+ yielded = True
562
+ yield path_to_file
563
+ # Check if the alternative is inside a 'file' directory
564
+ # path_to_file = os.path.join(env_dir, 'file', alternative)
565
+
566
+ # Check if the alternative is inside a 'bin' directory
567
+ path_to_file = os.path.join(env_dir, "bin", alternative)
568
+
569
+ if os.path.isfile(path_to_file):
570
+ if verbose:
571
+ print(f"[Found {filename}: {path_to_file}]")
572
+ yielded = True
573
+ yield path_to_file
574
+
575
+ # Check the path list.
576
+ for directory in searchpath:
577
+ for alternative in file_names:
578
+ path_to_file = os.path.join(directory, alternative)
579
+ if os.path.isfile(path_to_file):
580
+ yielded = True
581
+ yield path_to_file
582
+
583
+ # If we're on a POSIX system, then try using the 'which' command
584
+ # to find the file.
585
+ if os.name == "posix":
586
+ for alternative in file_names:
587
+ try:
588
+ p = subprocess.Popen(
589
+ ["which", alternative],
590
+ stdout=subprocess.PIPE,
591
+ stderr=subprocess.PIPE,
592
+ )
593
+ stdout, stderr = p.communicate()
594
+ path = _decode_stdoutdata(stdout).strip()
595
+ if path.endswith(alternative) and os.path.exists(path):
596
+ if verbose:
597
+ print(f"[Found {filename}: {path}]")
598
+ yielded = True
599
+ yield path
600
+ except (KeyboardInterrupt, SystemExit, OSError):
601
+ raise
602
+ finally:
603
+ pass
604
+
605
+ if not yielded:
606
+ msg = (
607
+ "NLTK was unable to find the %s file!"
608
+ "\nUse software specific "
609
+ "configuration parameters" % filename
610
+ )
611
+ if env_vars:
612
+ msg += " or set the %s environment variable" % env_vars[0]
613
+ msg += "."
614
+ if searchpath:
615
+ msg += "\n\n Searched in:"
616
+ msg += "".join("\n - %s" % d for d in searchpath)
617
+ if url:
618
+ msg += f"\n\n For more information on {filename}, see:\n <{url}>"
619
+ div = "=" * 75
620
+ raise LookupError(f"\n\n{div}\n{msg}\n{div}")
621
+
622
+
623
+ def find_file(
624
+ filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
625
+ ):
626
+ return next(
627
+ find_file_iter(filename, env_vars, searchpath, file_names, url, verbose)
628
+ )
629
+
630
+
631
+ def find_dir(
632
+ filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
633
+ ):
634
+ return next(
635
+ find_file_iter(
636
+ filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True
637
+ )
638
+ )
639
+
640
+
641
+ def find_binary_iter(
642
+ name,
643
+ path_to_bin=None,
644
+ env_vars=(),
645
+ searchpath=(),
646
+ binary_names=None,
647
+ url=None,
648
+ verbose=False,
649
+ ):
650
+ """
651
+ Search for a file to be used by nltk.
652
+
653
+ :param name: The name or path of the file.
654
+ :param path_to_bin: The user-supplied binary location (deprecated)
655
+ :param env_vars: A list of environment variable names to check.
656
+ :param file_names: A list of alternative file names to check.
657
+ :param searchpath: List of directories to search.
658
+ :param url: URL presented to user for download help.
659
+ :param verbose: Whether or not to print path when a file is found.
660
+ """
661
+ yield from find_file_iter(
662
+ path_to_bin or name, env_vars, searchpath, binary_names, url, verbose
663
+ )
664
+
665
+
666
+ def find_binary(
667
+ name,
668
+ path_to_bin=None,
669
+ env_vars=(),
670
+ searchpath=(),
671
+ binary_names=None,
672
+ url=None,
673
+ verbose=False,
674
+ ):
675
+ return next(
676
+ find_binary_iter(
677
+ name, path_to_bin, env_vars, searchpath, binary_names, url, verbose
678
+ )
679
+ )
680
+
681
+
682
+ def find_jar_iter(
683
+ name_pattern,
684
+ path_to_jar=None,
685
+ env_vars=(),
686
+ searchpath=(),
687
+ url=None,
688
+ verbose=False,
689
+ is_regex=False,
690
+ ):
691
+ """
692
+ Search for a jar that is used by nltk.
693
+
694
+ :param name_pattern: The name of the jar file
695
+ :param path_to_jar: The user-supplied jar location, or None.
696
+ :param env_vars: A list of environment variable names to check
697
+ in addition to the CLASSPATH variable which is
698
+ checked by default.
699
+ :param searchpath: List of directories to search.
700
+ :param is_regex: Whether name is a regular expression.
701
+ """
702
+
703
+ assert isinstance(name_pattern, str)
704
+ assert not isinstance(searchpath, str)
705
+ if isinstance(env_vars, str):
706
+ env_vars = env_vars.split()
707
+ yielded = False
708
+
709
+ # Make sure we check the CLASSPATH first
710
+ env_vars = ["CLASSPATH"] + list(env_vars)
711
+
712
+ # If an explicit location was given, then check it, and yield it if
713
+ # it's present; otherwise, complain.
714
+ if path_to_jar is not None:
715
+ if os.path.isfile(path_to_jar):
716
+ yielded = True
717
+ yield path_to_jar
718
+ else:
719
+ raise LookupError(
720
+ f"Could not find {name_pattern} jar file at {path_to_jar}"
721
+ )
722
+
723
+ # Check environment variables
724
+ for env_var in env_vars:
725
+ if env_var in os.environ:
726
+ if env_var == "CLASSPATH":
727
+ classpath = os.environ["CLASSPATH"]
728
+ for cp in classpath.split(os.path.pathsep):
729
+ cp = os.path.expanduser(cp)
730
+ if os.path.isfile(cp):
731
+ filename = os.path.basename(cp)
732
+ if (
733
+ is_regex
734
+ and re.match(name_pattern, filename)
735
+ or (not is_regex and filename == name_pattern)
736
+ ):
737
+ if verbose:
738
+ print(f"[Found {name_pattern}: {cp}]")
739
+ yielded = True
740
+ yield cp
741
+ # The case where user put directory containing the jar file in the classpath
742
+ if os.path.isdir(cp):
743
+ if not is_regex:
744
+ if os.path.isfile(os.path.join(cp, name_pattern)):
745
+ if verbose:
746
+ print(f"[Found {name_pattern}: {cp}]")
747
+ yielded = True
748
+ yield os.path.join(cp, name_pattern)
749
+ else:
750
+ # Look for file using regular expression
751
+ for file_name in os.listdir(cp):
752
+ if re.match(name_pattern, file_name):
753
+ if verbose:
754
+ print(
755
+ "[Found %s: %s]"
756
+ % (
757
+ name_pattern,
758
+ os.path.join(cp, file_name),
759
+ )
760
+ )
761
+ yielded = True
762
+ yield os.path.join(cp, file_name)
763
+
764
+ else:
765
+ jar_env = os.path.expanduser(os.environ[env_var])
766
+ jar_iter = (
767
+ (
768
+ os.path.join(jar_env, path_to_jar)
769
+ for path_to_jar in os.listdir(jar_env)
770
+ )
771
+ if os.path.isdir(jar_env)
772
+ else (jar_env,)
773
+ )
774
+ for path_to_jar in jar_iter:
775
+ if os.path.isfile(path_to_jar):
776
+ filename = os.path.basename(path_to_jar)
777
+ if (
778
+ is_regex
779
+ and re.match(name_pattern, filename)
780
+ or (not is_regex and filename == name_pattern)
781
+ ):
782
+ if verbose:
783
+ print(f"[Found {name_pattern}: {path_to_jar}]")
784
+ yielded = True
785
+ yield path_to_jar
786
+
787
+ # Check the path list.
788
+ for directory in searchpath:
789
+ if is_regex:
790
+ for filename in os.listdir(directory):
791
+ path_to_jar = os.path.join(directory, filename)
792
+ if os.path.isfile(path_to_jar):
793
+ if re.match(name_pattern, filename):
794
+ if verbose:
795
+ print(f"[Found {filename}: {path_to_jar}]")
796
+ yielded = True
797
+ yield path_to_jar
798
+ else:
799
+ path_to_jar = os.path.join(directory, name_pattern)
800
+ if os.path.isfile(path_to_jar):
801
+ if verbose:
802
+ print(f"[Found {name_pattern}: {path_to_jar}]")
803
+ yielded = True
804
+ yield path_to_jar
805
+
806
+ if not yielded:
807
+ # If nothing was found, raise an error
808
+ msg = "NLTK was unable to find %s!" % name_pattern
809
+ if env_vars:
810
+ msg += " Set the %s environment variable" % env_vars[0]
811
+ msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ")
812
+ if searchpath:
813
+ msg += "\n\n Searched in:"
814
+ msg += "".join("\n - %s" % d for d in searchpath)
815
+ if url:
816
+ msg += "\n\n For more information, on {}, see:\n <{}>".format(
817
+ name_pattern,
818
+ url,
819
+ )
820
+ div = "=" * 75
821
+ raise LookupError(f"\n\n{div}\n{msg}\n{div}")
822
+
823
+
824
+ def find_jar(
825
+ name_pattern,
826
+ path_to_jar=None,
827
+ env_vars=(),
828
+ searchpath=(),
829
+ url=None,
830
+ verbose=False,
831
+ is_regex=False,
832
+ ):
833
+ return next(
834
+ find_jar_iter(
835
+ name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex
836
+ )
837
+ )
838
+
839
+
840
+ def find_jars_within_path(path_to_jars):
841
+ return [
842
+ os.path.join(root, filename)
843
+ for root, dirnames, filenames in os.walk(path_to_jars)
844
+ for filename in fnmatch.filter(filenames, "*.jar")
845
+ ]
846
+
847
+
848
+ def _decode_stdoutdata(stdoutdata):
849
+ """Convert data read from stdout/stderr to unicode"""
850
+ if not isinstance(stdoutdata, bytes):
851
+ return stdoutdata
852
+
853
+ encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
854
+ if encoding is None:
855
+ return stdoutdata.decode()
856
+ return stdoutdata.decode(encoding)
857
+
858
+
859
+ ##########################################################################
860
+ # Import Stdlib Module
861
+ ##########################################################################
862
+
863
+
864
+ def import_from_stdlib(module):
865
+ """
866
+ When python is run from within the nltk/ directory tree, the
867
+ current directory is included at the beginning of the search path.
868
+ Unfortunately, that means that modules within nltk can sometimes
869
+ shadow standard library modules. As an example, the stdlib
870
+ 'inspect' module will attempt to import the stdlib 'tokenize'
871
+ module, but will instead end up importing NLTK's 'tokenize' module
872
+ instead (causing the import to fail).
873
+ """
874
+ old_path = sys.path
875
+ sys.path = [d for d in sys.path if d not in ("", ".")]
876
+ m = __import__(module)
877
+ sys.path = old_path
878
+ return m
879
+
880
+
881
+ ##########################################################################
882
+ # Wrapper for ElementTree Elements
883
+ ##########################################################################
884
+
885
+
886
+ class ElementWrapper:
887
+ """
888
+ A wrapper around ElementTree Element objects whose main purpose is
889
+ to provide nicer __repr__ and __str__ methods. In addition, any
890
+ of the wrapped Element's methods that return other Element objects
891
+ are overridden to wrap those values before returning them.
892
+
893
+ This makes Elements more convenient to work with in
894
+ interactive sessions and doctests, at the expense of some
895
+ efficiency.
896
+ """
897
+
898
+ # Prevent double-wrapping:
899
+ def __new__(cls, etree):
900
+ """
901
+ Create and return a wrapper around a given Element object.
902
+ If ``etree`` is an ``ElementWrapper``, then ``etree`` is
903
+ returned as-is.
904
+ """
905
+ if isinstance(etree, ElementWrapper):
906
+ return etree
907
+ else:
908
+ return object.__new__(ElementWrapper)
909
+
910
+ def __init__(self, etree):
911
+ r"""
912
+ Initialize a new Element wrapper for ``etree``.
913
+
914
+ If ``etree`` is a string, then it will be converted to an
915
+ Element object using ``ElementTree.fromstring()`` first:
916
+
917
+ >>> ElementWrapper("<test></test>")
918
+ <Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
919
+
920
+ """
921
+ if isinstance(etree, str):
922
+ etree = ElementTree.fromstring(etree)
923
+ self.__dict__["_etree"] = etree
924
+
925
+ def unwrap(self):
926
+ """
927
+ Return the Element object wrapped by this wrapper.
928
+ """
929
+ return self._etree
930
+
931
+ ##////////////////////////////////////////////////////////////
932
+ # { String Representation
933
+ ##////////////////////////////////////////////////////////////
934
+
935
+ def __repr__(self):
936
+ s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8")
937
+ if len(s) > 60:
938
+ e = s.rfind("<")
939
+ if (len(s) - e) > 30:
940
+ e = -20
941
+ s = f"{s[:30]}...{s[e:]}"
942
+ return "<Element %r>" % s
943
+
944
+ def __str__(self):
945
+ """
946
+ :return: the result of applying ``ElementTree.tostring()`` to
947
+ the wrapped Element object.
948
+ """
949
+ return (
950
+ ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip()
951
+ )
952
+
953
+ ##////////////////////////////////////////////////////////////
954
+ # { Element interface Delegation (pass-through)
955
+ ##////////////////////////////////////////////////////////////
956
+
957
+ def __getattr__(self, attrib):
958
+ return getattr(self._etree, attrib)
959
+
960
+ def __setattr__(self, attr, value):
961
+ return setattr(self._etree, attr, value)
962
+
963
+ def __delattr__(self, attr):
964
+ return delattr(self._etree, attr)
965
+
966
+ def __setitem__(self, index, element):
967
+ self._etree[index] = element
968
+
969
+ def __delitem__(self, index):
970
+ del self._etree[index]
971
+
972
+ def __setslice__(self, start, stop, elements):
973
+ self._etree[start:stop] = elements
974
+
975
+ def __delslice__(self, start, stop):
976
+ del self._etree[start:stop]
977
+
978
+ def __len__(self):
979
+ return len(self._etree)
980
+
981
+ ##////////////////////////////////////////////////////////////
982
+ # { Element interface Delegation (wrap result)
983
+ ##////////////////////////////////////////////////////////////
984
+
985
+ def __getitem__(self, index):
986
+ return ElementWrapper(self._etree[index])
987
+
988
+ def __getslice__(self, start, stop):
989
+ return [ElementWrapper(elt) for elt in self._etree[start:stop]]
990
+
991
+ def getchildren(self):
992
+ return [ElementWrapper(elt) for elt in self._etree]
993
+
994
+ def getiterator(self, tag=None):
995
+ return (ElementWrapper(elt) for elt in self._etree.getiterator(tag))
996
+
997
+ def makeelement(self, tag, attrib):
998
+ return ElementWrapper(self._etree.makeelement(tag, attrib))
999
+
1000
+ def find(self, path):
1001
+ elt = self._etree.find(path)
1002
+ if elt is None:
1003
+ return elt
1004
+ else:
1005
+ return ElementWrapper(elt)
1006
+
1007
+ def findall(self, path):
1008
+ return [ElementWrapper(elt) for elt in self._etree.findall(path)]
1009
+
1010
+
1011
+ ######################################################################
1012
+ # Helper for Handling Slicing
1013
+ ######################################################################
1014
+
1015
+
1016
+ def slice_bounds(sequence, slice_obj, allow_step=False):
1017
+ """
1018
+ Given a slice, return the corresponding (start, stop) bounds,
1019
+ taking into account None indices and negative indices. The
1020
+ following guarantees are made for the returned start and stop values:
1021
+
1022
+ - 0 <= start <= len(sequence)
1023
+ - 0 <= stop <= len(sequence)
1024
+ - start <= stop
1025
+
1026
+ :raise ValueError: If ``slice_obj.step`` is not None.
1027
+ :param allow_step: If true, then the slice object may have a
1028
+ non-None step. If it does, then return a tuple
1029
+ (start, stop, step).
1030
+ """
1031
+ start, stop = (slice_obj.start, slice_obj.stop)
1032
+
1033
+ # If allow_step is true, then include the step in our return
1034
+ # value tuple.
1035
+ if allow_step:
1036
+ step = slice_obj.step
1037
+ if step is None:
1038
+ step = 1
1039
+ # Use a recursive call without allow_step to find the slice
1040
+ # bounds. If step is negative, then the roles of start and
1041
+ # stop (in terms of default values, etc), are swapped.
1042
+ if step < 0:
1043
+ start, stop = slice_bounds(sequence, slice(stop, start))
1044
+ else:
1045
+ start, stop = slice_bounds(sequence, slice(start, stop))
1046
+ return start, stop, step
1047
+
1048
+ # Otherwise, make sure that no non-default step value is used.
1049
+ elif slice_obj.step not in (None, 1):
1050
+ raise ValueError(
1051
+ "slices with steps are not supported by %s" % sequence.__class__.__name__
1052
+ )
1053
+
1054
+ # Supply default offsets.
1055
+ if start is None:
1056
+ start = 0
1057
+ if stop is None:
1058
+ stop = len(sequence)
1059
+
1060
+ # Handle negative indices.
1061
+ if start < 0:
1062
+ start = max(0, len(sequence) + start)
1063
+ if stop < 0:
1064
+ stop = max(0, len(sequence) + stop)
1065
+
1066
+ # Make sure stop doesn't go past the end of the list. Note that
1067
+ # we avoid calculating len(sequence) if possible, because for lazy
1068
+ # sequences, calculating the length of a sequence can be expensive.
1069
+ if stop > 0:
1070
+ try:
1071
+ sequence[stop - 1]
1072
+ except IndexError:
1073
+ stop = len(sequence)
1074
+
1075
+ # Make sure start isn't past stop.
1076
+ start = min(start, stop)
1077
+
1078
+ # That's all folks!
1079
+ return start, stop
1080
+
1081
+
1082
+ ######################################################################
1083
+ # Permission Checking
1084
+ ######################################################################
1085
+
1086
+
1087
+ def is_writable(path):
1088
+ # Ensure that it exists.
1089
+ if not os.path.exists(path):
1090
+ return False
1091
+
1092
+ # If we're on a posix system, check its permissions.
1093
+ if hasattr(os, "getuid"):
1094
+ statdata = os.stat(path)
1095
+ perm = stat.S_IMODE(statdata.st_mode)
1096
+ # is it world-writable?
1097
+ if perm & 0o002:
1098
+ return True
1099
+ # do we own it?
1100
+ elif statdata.st_uid == os.getuid() and (perm & 0o200):
1101
+ return True
1102
+ # are we in a group that can write to it?
1103
+ elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020):
1104
+ return True
1105
+ # otherwise, we can't write to it.
1106
+ else:
1107
+ return False
1108
+
1109
+ # Otherwise, we'll assume it's writable.
1110
+ # [xx] should we do other checks on other platforms?
1111
+ return True
1112
+
1113
+
1114
+ ######################################################################
1115
+ # NLTK Error reporting
1116
+ ######################################################################
1117
+
1118
+
1119
+ def raise_unorderable_types(ordering, a, b):
1120
+ raise TypeError(
1121
+ "unorderable types: %s() %s %s()"
1122
+ % (type(a).__name__, ordering, type(b).__name__)
1123
+ )
env-llmeval/lib/python3.10/site-packages/nltk/jsontags.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: JSON Encoder/Decoder Helpers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Xu <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Register JSON tags, so the nltk data loader knows what module and class to look for.
11
+
12
+ NLTK uses simple '!' tags to mark the types of objects, but the fully-qualified
13
+ "tag:nltk.org,2011:" prefix is also accepted in case anyone ends up
14
+ using it.
15
+ """
16
+
17
+ import json
18
+
19
+ json_tags = {}
20
+
21
+ TAG_PREFIX = "!"
22
+
23
+
24
+ def register_tag(cls):
25
+ """
26
+ Decorates a class to register it's json tag.
27
+ """
28
+ json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls
29
+ return cls
30
+
31
+
32
+ class JSONTaggedEncoder(json.JSONEncoder):
33
+ def default(self, obj):
34
+ obj_tag = getattr(obj, "json_tag", None)
35
+ if obj_tag is None:
36
+ return super().default(obj)
37
+ obj_tag = TAG_PREFIX + obj_tag
38
+ obj = obj.encode_json_obj()
39
+ return {obj_tag: obj}
40
+
41
+
42
+ class JSONTaggedDecoder(json.JSONDecoder):
43
+ def decode(self, s):
44
+ return self.decode_obj(super().decode(s))
45
+
46
+ @classmethod
47
+ def decode_obj(cls, obj):
48
+ # Decode nested objects first.
49
+ if isinstance(obj, dict):
50
+ obj = {key: cls.decode_obj(val) for (key, val) in obj.items()}
51
+ elif isinstance(obj, list):
52
+ obj = list(cls.decode_obj(val) for val in obj)
53
+ # Check if we have a tagged object.
54
+ if not isinstance(obj, dict) or len(obj) != 1:
55
+ return obj
56
+ obj_tag = next(iter(obj.keys()))
57
+ if not obj_tag.startswith("!"):
58
+ return obj
59
+ if obj_tag not in json_tags:
60
+ raise ValueError("Unknown tag", obj_tag)
61
+ obj_cls = json_tags[obj_tag]
62
+ return obj_cls.decode_json_obj(obj[obj_tag])
63
+
64
+
65
+ __all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"]
env-llmeval/lib/python3.10/site-packages/nltk/langnames.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Codes
2
+ #
3
+ # Copyright (C) 2022-2023 NLTK Project
4
+ # Author: Eric Kafe <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ #
8
+ # iso639-3 language codes (C) https://iso639-3.sil.org/
9
+
10
+ """
11
+ Translate between language names and language codes.
12
+
13
+ The iso639-3 language codes were downloaded from the registration authority at
14
+ https://iso639-3.sil.org/
15
+
16
+ The iso639-3 codeset is evolving, so retired language codes are kept in the
17
+ "iso639retired" dictionary, which is used as fallback by the wrapper functions
18
+ "langname" and "langcode", in order to support the lookup of retired codes.
19
+
20
+ The "langcode" function returns the current iso639-3 code if there is one,
21
+ and falls back to the retired code otherwise. As specified by BCP-47,
22
+ it returns the shortest (2-letter) code by default, but 3-letter codes
23
+ are also available:
24
+
25
+ >>> import nltk.langnames as lgn
26
+ >>> lgn.langname('fri') #'fri' is a retired code
27
+ 'Western Frisian'
28
+
29
+ The current code is different from the retired one:
30
+ >>> lgn.langcode('Western Frisian')
31
+ 'fy'
32
+
33
+ >>> lgn.langcode('Western Frisian', typ = 3)
34
+ 'fry'
35
+
36
+ """
37
+
38
+ import re
39
+ from warnings import warn
40
+
41
+ from nltk.corpus import bcp47
42
+
43
+ codepattern = re.compile("[a-z][a-z][a-z]?")
44
+
45
+
46
+ def langname(tag, typ="full"):
47
+ """
48
+ Convert a composite BCP-47 tag to a language name
49
+
50
+ >>> from nltk.langnames import langname
51
+ >>> langname('ca-Latn-ES-valencia')
52
+ 'Catalan: Latin: Spain: Valencian'
53
+
54
+ >>> langname('ca-Latn-ES-valencia', typ="short")
55
+ 'Catalan'
56
+ """
57
+ tags = tag.split("-")
58
+ code = tags[0].lower()
59
+ if codepattern.fullmatch(code):
60
+ if code in iso639retired: # retired codes
61
+ return iso639retired[code]
62
+ elif code in iso639short: # 3-letter codes
63
+ code2 = iso639short[code] # convert to 2-letter code
64
+ warn(f"Shortening {code!r} to {code2!r}", stacklevel=2)
65
+ tag = "-".join([code2] + tags[1:])
66
+ name = bcp47.name(tag) # parse according to BCP-47
67
+ if typ == "full":
68
+ return name # include all subtags
69
+ elif name:
70
+ return name.split(":")[0] # only the language subtag
71
+ else:
72
+ warn(f"Could not find code in {code!r}", stacklevel=2)
73
+
74
+
75
+ def langcode(name, typ=2):
76
+ """
77
+ Convert language name to iso639-3 language code. Returns the short 2-letter
78
+ code by default, if one is available, and the 3-letter code otherwise:
79
+
80
+ >>> from nltk.langnames import langcode
81
+ >>> langcode('Modern Greek (1453-)')
82
+ 'el'
83
+
84
+ Specify 'typ=3' to get the 3-letter code:
85
+
86
+ >>> langcode('Modern Greek (1453-)', typ=3)
87
+ 'ell'
88
+ """
89
+ if name in bcp47.langcode:
90
+ code = bcp47.langcode[name]
91
+ if typ == 3 and code in iso639long:
92
+ code = iso639long[code] # convert to 3-letter code
93
+ return code
94
+ elif name in iso639code_retired:
95
+ return iso639code_retired[name]
96
+ else:
97
+ warn(f"Could not find language in {name!r}", stacklevel=2)
98
+
99
+
100
+ # =======================================================================
101
+ # Translate betwwen Wikidata Q-codes and BCP-47 codes or names
102
+ # .......................................................................
103
+
104
+
105
+ def tag2q(tag):
106
+ """
107
+ Convert BCP-47 tag to Wikidata Q-code
108
+
109
+ >>> tag2q('nds-u-sd-demv')
110
+ 'Q4289225'
111
+ """
112
+ return bcp47.wiki_q[tag]
113
+
114
+
115
+ def q2tag(qcode):
116
+ """
117
+ Convert Wikidata Q-code to BCP-47 tag
118
+
119
+ >>> q2tag('Q4289225')
120
+ 'nds-u-sd-demv'
121
+ """
122
+ return wiki_bcp47[qcode]
123
+
124
+
125
+ def q2name(qcode, typ="full"):
126
+ """
127
+ Convert Wikidata Q-code to BCP-47 (full or short) language name
128
+
129
+ >>> q2name('Q4289225')
130
+ 'Low German: Mecklenburg-Vorpommern'
131
+
132
+ >>> q2name('Q4289225', "short")
133
+ 'Low German'
134
+ """
135
+ return langname(q2tag(qcode), typ)
136
+
137
+
138
+ def lang2q(name):
139
+ """
140
+ Convert simple language name to Wikidata Q-code
141
+
142
+ >>> lang2q('Low German')
143
+ 'Q25433'
144
+ """
145
+ return tag2q(langcode(name))
146
+
147
+
148
+ # ======================================================================
149
+ # Data dictionaries
150
+ # ......................................................................
151
+
152
+
153
+ def inverse_dict(dic):
154
+ """Return inverse mapping, but only if it is bijective"""
155
+ if len(dic.keys()) == len(set(dic.values())):
156
+ return {val: key for (key, val) in dic.items()}
157
+ else:
158
+ warn("This dictionary has no bijective inverse mapping.")
159
+
160
+
161
+ bcp47.load_wiki_q() # Wikidata conversion table needs to be loaded explicitly
162
+ wiki_bcp47 = inverse_dict(bcp47.wiki_q)
163
+
164
+ iso639short = {
165
+ "aar": "aa",
166
+ "abk": "ab",
167
+ "afr": "af",
168
+ "aka": "ak",
169
+ "amh": "am",
170
+ "ara": "ar",
171
+ "arg": "an",
172
+ "asm": "as",
173
+ "ava": "av",
174
+ "ave": "ae",
175
+ "aym": "ay",
176
+ "aze": "az",
177
+ "bak": "ba",
178
+ "bam": "bm",
179
+ "bel": "be",
180
+ "ben": "bn",
181
+ "bis": "bi",
182
+ "bod": "bo",
183
+ "bos": "bs",
184
+ "bre": "br",
185
+ "bul": "bg",
186
+ "cat": "ca",
187
+ "ces": "cs",
188
+ "cha": "ch",
189
+ "che": "ce",
190
+ "chu": "cu",
191
+ "chv": "cv",
192
+ "cor": "kw",
193
+ "cos": "co",
194
+ "cre": "cr",
195
+ "cym": "cy",
196
+ "dan": "da",
197
+ "deu": "de",
198
+ "div": "dv",
199
+ "dzo": "dz",
200
+ "ell": "el",
201
+ "eng": "en",
202
+ "epo": "eo",
203
+ "est": "et",
204
+ "eus": "eu",
205
+ "ewe": "ee",
206
+ "fao": "fo",
207
+ "fas": "fa",
208
+ "fij": "fj",
209
+ "fin": "fi",
210
+ "fra": "fr",
211
+ "fry": "fy",
212
+ "ful": "ff",
213
+ "gla": "gd",
214
+ "gle": "ga",
215
+ "glg": "gl",
216
+ "glv": "gv",
217
+ "grn": "gn",
218
+ "guj": "gu",
219
+ "hat": "ht",
220
+ "hau": "ha",
221
+ "hbs": "sh",
222
+ "heb": "he",
223
+ "her": "hz",
224
+ "hin": "hi",
225
+ "hmo": "ho",
226
+ "hrv": "hr",
227
+ "hun": "hu",
228
+ "hye": "hy",
229
+ "ibo": "ig",
230
+ "ido": "io",
231
+ "iii": "ii",
232
+ "iku": "iu",
233
+ "ile": "ie",
234
+ "ina": "ia",
235
+ "ind": "id",
236
+ "ipk": "ik",
237
+ "isl": "is",
238
+ "ita": "it",
239
+ "jav": "jv",
240
+ "jpn": "ja",
241
+ "kal": "kl",
242
+ "kan": "kn",
243
+ "kas": "ks",
244
+ "kat": "ka",
245
+ "kau": "kr",
246
+ "kaz": "kk",
247
+ "khm": "km",
248
+ "kik": "ki",
249
+ "kin": "rw",
250
+ "kir": "ky",
251
+ "kom": "kv",
252
+ "kon": "kg",
253
+ "kor": "ko",
254
+ "kua": "kj",
255
+ "kur": "ku",
256
+ "lao": "lo",
257
+ "lat": "la",
258
+ "lav": "lv",
259
+ "lim": "li",
260
+ "lin": "ln",
261
+ "lit": "lt",
262
+ "ltz": "lb",
263
+ "lub": "lu",
264
+ "lug": "lg",
265
+ "mah": "mh",
266
+ "mal": "ml",
267
+ "mar": "mr",
268
+ "mkd": "mk",
269
+ "mlg": "mg",
270
+ "mlt": "mt",
271
+ "mon": "mn",
272
+ "mri": "mi",
273
+ "msa": "ms",
274
+ "mya": "my",
275
+ "nau": "na",
276
+ "nav": "nv",
277
+ "nbl": "nr",
278
+ "nde": "nd",
279
+ "ndo": "ng",
280
+ "nep": "ne",
281
+ "nld": "nl",
282
+ "nno": "nn",
283
+ "nob": "nb",
284
+ "nor": "no",
285
+ "nya": "ny",
286
+ "oci": "oc",
287
+ "oji": "oj",
288
+ "ori": "or",
289
+ "orm": "om",
290
+ "oss": "os",
291
+ "pan": "pa",
292
+ "pli": "pi",
293
+ "pol": "pl",
294
+ "por": "pt",
295
+ "pus": "ps",
296
+ "que": "qu",
297
+ "roh": "rm",
298
+ "ron": "ro",
299
+ "run": "rn",
300
+ "rus": "ru",
301
+ "sag": "sg",
302
+ "san": "sa",
303
+ "sin": "si",
304
+ "slk": "sk",
305
+ "slv": "sl",
306
+ "sme": "se",
307
+ "smo": "sm",
308
+ "sna": "sn",
309
+ "snd": "sd",
310
+ "som": "so",
311
+ "sot": "st",
312
+ "spa": "es",
313
+ "sqi": "sq",
314
+ "srd": "sc",
315
+ "srp": "sr",
316
+ "ssw": "ss",
317
+ "sun": "su",
318
+ "swa": "sw",
319
+ "swe": "sv",
320
+ "tah": "ty",
321
+ "tam": "ta",
322
+ "tat": "tt",
323
+ "tel": "te",
324
+ "tgk": "tg",
325
+ "tgl": "tl",
326
+ "tha": "th",
327
+ "tir": "ti",
328
+ "ton": "to",
329
+ "tsn": "tn",
330
+ "tso": "ts",
331
+ "tuk": "tk",
332
+ "tur": "tr",
333
+ "twi": "tw",
334
+ "uig": "ug",
335
+ "ukr": "uk",
336
+ "urd": "ur",
337
+ "uzb": "uz",
338
+ "ven": "ve",
339
+ "vie": "vi",
340
+ "vol": "vo",
341
+ "wln": "wa",
342
+ "wol": "wo",
343
+ "xho": "xh",
344
+ "yid": "yi",
345
+ "yor": "yo",
346
+ "zha": "za",
347
+ "zho": "zh",
348
+ "zul": "zu",
349
+ }
350
+
351
+
352
+ iso639retired = {
353
+ "fri": "Western Frisian",
354
+ "auv": "Auvergnat",
355
+ "gsc": "Gascon",
356
+ "lms": "Limousin",
357
+ "lnc": "Languedocien",
358
+ "prv": "Provençal",
359
+ "amd": "Amapá Creole",
360
+ "bgh": "Bogan",
361
+ "bnh": "Banawá",
362
+ "bvs": "Belgian Sign Language",
363
+ "ccy": "Southern Zhuang",
364
+ "cit": "Chittagonian",
365
+ "flm": "Falam Chin",
366
+ "jap": "Jaruára",
367
+ "kob": "Kohoroxitari",
368
+ "mob": "Moinba",
369
+ "mzf": "Aiku",
370
+ "nhj": "Tlalitzlipa Nahuatl",
371
+ "nhs": "Southeastern Puebla Nahuatl",
372
+ "occ": "Occidental",
373
+ "tmx": "Tomyang",
374
+ "tot": "Patla-Chicontla Totonac",
375
+ "xmi": "Miarrã",
376
+ "yib": "Yinglish",
377
+ "ztc": "Lachirioag Zapotec",
378
+ "atf": "Atuence",
379
+ "bqe": "Navarro-Labourdin Basque",
380
+ "bsz": "Souletin Basque",
381
+ "aex": "Amerax",
382
+ "ahe": "Ahe",
383
+ "aiz": "Aari",
384
+ "akn": "Amikoana",
385
+ "arf": "Arafundi",
386
+ "azr": "Adzera",
387
+ "bcx": "Pamona",
388
+ "bii": "Bisu",
389
+ "bke": "Bengkulu",
390
+ "blu": "Hmong Njua",
391
+ "boc": "Bakung Kenyah",
392
+ "bsd": "Sarawak Bisaya",
393
+ "bwv": "Bahau River Kenyah",
394
+ "bxt": "Buxinhua",
395
+ "byu": "Buyang",
396
+ "ccx": "Northern Zhuang",
397
+ "cru": "Carútana",
398
+ "dat": "Darang Deng",
399
+ "dyk": "Land Dayak",
400
+ "eni": "Enim",
401
+ "fiz": "Izere",
402
+ "gen": "Geman Deng",
403
+ "ggh": "Garreh-Ajuran",
404
+ "itu": "Itutang",
405
+ "kds": "Lahu Shi",
406
+ "knh": "Kayan River Kenyah",
407
+ "krg": "North Korowai",
408
+ "krq": "Krui",
409
+ "kxg": "Katingan",
410
+ "lmt": "Lematang",
411
+ "lnt": "Lintang",
412
+ "lod": "Berawan",
413
+ "mbg": "Northern Nambikuára",
414
+ "mdo": "Southwest Gbaya",
415
+ "mhv": "Arakanese",
416
+ "miv": "Mimi",
417
+ "mqd": "Madang",
418
+ "nky": "Khiamniungan Naga",
419
+ "nxj": "Nyadu",
420
+ "ogn": "Ogan",
421
+ "ork": "Orokaiva",
422
+ "paj": "Ipeka-Tapuia",
423
+ "pec": "Southern Pesisir",
424
+ "pen": "Penesak",
425
+ "plm": "Palembang",
426
+ "poj": "Lower Pokomo",
427
+ "pun": "Pubian",
428
+ "rae": "Ranau",
429
+ "rjb": "Rajbanshi",
430
+ "rws": "Rawas",
431
+ "sdd": "Semendo",
432
+ "sdi": "Sindang Kelingi",
433
+ "skl": "Selako",
434
+ "slb": "Kahumamahon Saluan",
435
+ "srj": "Serawai",
436
+ "suf": "Tarpia",
437
+ "suh": "Suba",
438
+ "suu": "Sungkai",
439
+ "szk": "Sizaki",
440
+ "tle": "Southern Marakwet",
441
+ "tnj": "Tanjong",
442
+ "ttx": "Tutong 1",
443
+ "ubm": "Upper Baram Kenyah",
444
+ "vky": "Kayu Agung",
445
+ "vmo": "Muko-Muko",
446
+ "wre": "Ware",
447
+ "xah": "Kahayan",
448
+ "xkm": "Mahakam Kenyah",
449
+ "xuf": "Kunfal",
450
+ "yio": "Dayao Yi",
451
+ "ymj": "Muji Yi",
452
+ "ypl": "Pula Yi",
453
+ "ypw": "Puwa Yi",
454
+ "ywm": "Wumeng Yi",
455
+ "yym": "Yuanjiang-Mojiang Yi",
456
+ "mly": "Malay (individual language)",
457
+ "muw": "Mundari",
458
+ "xst": "Silt'e",
459
+ "ope": "Old Persian",
460
+ "scc": "Serbian",
461
+ "scr": "Croatian",
462
+ "xsk": "Sakan",
463
+ "mol": "Moldavian",
464
+ "aay": "Aariya",
465
+ "acc": "Cubulco Achí",
466
+ "cbm": "Yepocapa Southwestern Cakchiquel",
467
+ "chs": "Chumash",
468
+ "ckc": "Northern Cakchiquel",
469
+ "ckd": "South Central Cakchiquel",
470
+ "cke": "Eastern Cakchiquel",
471
+ "ckf": "Southern Cakchiquel",
472
+ "cki": "Santa María De Jesús Cakchiquel",
473
+ "ckj": "Santo Domingo Xenacoj Cakchiquel",
474
+ "ckk": "Acatenango Southwestern Cakchiquel",
475
+ "ckw": "Western Cakchiquel",
476
+ "cnm": "Ixtatán Chuj",
477
+ "cti": "Tila Chol",
478
+ "cun": "Cunén Quiché",
479
+ "eml": "Emiliano-Romagnolo",
480
+ "eur": "Europanto",
481
+ "gmo": "Gamo-Gofa-Dawro",
482
+ "hsf": "Southeastern Huastec",
483
+ "hva": "San Luís Potosí Huastec",
484
+ "ixi": "Nebaj Ixil",
485
+ "ixj": "Chajul Ixil",
486
+ "jai": "Western Jacalteco",
487
+ "mms": "Southern Mam",
488
+ "mpf": "Tajumulco Mam",
489
+ "mtz": "Tacanec",
490
+ "mvc": "Central Mam",
491
+ "mvj": "Todos Santos Cuchumatán Mam",
492
+ "poa": "Eastern Pokomam",
493
+ "pob": "Western Pokomchí",
494
+ "pou": "Southern Pokomam",
495
+ "ppv": "Papavô",
496
+ "quj": "Joyabaj Quiché",
497
+ "qut": "West Central Quiché",
498
+ "quu": "Eastern Quiché",
499
+ "qxi": "San Andrés Quiché",
500
+ "sic": "Malinguat",
501
+ "stc": "Santa Cruz",
502
+ "tlz": "Toala'",
503
+ "tzb": "Bachajón Tzeltal",
504
+ "tzc": "Chamula Tzotzil",
505
+ "tze": "Chenalhó Tzotzil",
506
+ "tzs": "San Andrés Larrainzar Tzotzil",
507
+ "tzt": "Western Tzutujil",
508
+ "tzu": "Huixtán Tzotzil",
509
+ "tzz": "Zinacantán Tzotzil",
510
+ "vlr": "Vatrata",
511
+ "yus": "Chan Santa Cruz Maya",
512
+ "nfg": "Nyeng",
513
+ "nfk": "Shakara",
514
+ "agp": "Paranan",
515
+ "bhk": "Albay Bicolano",
516
+ "bkb": "Finallig",
517
+ "btb": "Beti (Cameroon)",
518
+ "cjr": "Chorotega",
519
+ "cmk": "Chimakum",
520
+ "drh": "Darkhat",
521
+ "drw": "Darwazi",
522
+ "gav": "Gabutamon",
523
+ "mof": "Mohegan-Montauk-Narragansett",
524
+ "mst": "Cataelano Mandaya",
525
+ "myt": "Sangab Mandaya",
526
+ "rmr": "Caló",
527
+ "sgl": "Sanglechi-Ishkashimi",
528
+ "sul": "Surigaonon",
529
+ "sum": "Sumo-Mayangna",
530
+ "tnf": "Tangshewi",
531
+ "wgw": "Wagawaga",
532
+ "ayx": "Ayi (China)",
533
+ "bjq": "Southern Betsimisaraka Malagasy",
534
+ "dha": "Dhanwar (India)",
535
+ "dkl": "Kolum So Dogon",
536
+ "mja": "Mahei",
537
+ "nbf": "Naxi",
538
+ "noo": "Nootka",
539
+ "tie": "Tingal",
540
+ "tkk": "Takpa",
541
+ "baz": "Tunen",
542
+ "bjd": "Bandjigali",
543
+ "ccq": "Chaungtha",
544
+ "cka": "Khumi Awa Chin",
545
+ "dap": "Nisi (India)",
546
+ "dwl": "Walo Kumbe Dogon",
547
+ "elp": "Elpaputih",
548
+ "gbc": "Garawa",
549
+ "gio": "Gelao",
550
+ "hrr": "Horuru",
551
+ "ibi": "Ibilo",
552
+ "jar": "Jarawa (Nigeria)",
553
+ "kdv": "Kado",
554
+ "kgh": "Upper Tanudan Kalinga",
555
+ "kpp": "Paku Karen",
556
+ "kzh": "Kenuzi-Dongola",
557
+ "lcq": "Luhu",
558
+ "mgx": "Omati",
559
+ "nln": "Durango Nahuatl",
560
+ "pbz": "Palu",
561
+ "pgy": "Pongyong",
562
+ "sca": "Sansu",
563
+ "tlw": "South Wemale",
564
+ "unp": "Worora",
565
+ "wiw": "Wirangu",
566
+ "ybd": "Yangbye",
567
+ "yen": "Yendang",
568
+ "yma": "Yamphe",
569
+ "daf": "Dan",
570
+ "djl": "Djiwarli",
571
+ "ggr": "Aghu Tharnggalu",
572
+ "ilw": "Talur",
573
+ "izi": "Izi-Ezaa-Ikwo-Mgbo",
574
+ "meg": "Mea",
575
+ "mld": "Malakhel",
576
+ "mnt": "Maykulan",
577
+ "mwd": "Mudbura",
578
+ "myq": "Forest Maninka",
579
+ "nbx": "Ngura",
580
+ "nlr": "Ngarla",
581
+ "pcr": "Panang",
582
+ "ppr": "Piru",
583
+ "tgg": "Tangga",
584
+ "wit": "Wintu",
585
+ "xia": "Xiandao",
586
+ "yiy": "Yir Yoront",
587
+ "yos": "Yos",
588
+ "emo": "Emok",
589
+ "ggm": "Gugu Mini",
590
+ "leg": "Lengua",
591
+ "lmm": "Lamam",
592
+ "mhh": "Maskoy Pidgin",
593
+ "puz": "Purum Naga",
594
+ "sap": "Sanapaná",
595
+ "yuu": "Yugh",
596
+ "aam": "Aramanik",
597
+ "adp": "Adap",
598
+ "aue": "ǂKxʼauǁʼein",
599
+ "bmy": "Bemba (Democratic Republic of Congo)",
600
+ "bxx": "Borna (Democratic Republic of Congo)",
601
+ "byy": "Buya",
602
+ "dzd": "Daza",
603
+ "gfx": "Mangetti Dune ǃXung",
604
+ "gti": "Gbati-ri",
605
+ "ime": "Imeraguen",
606
+ "kbf": "Kakauhua",
607
+ "koj": "Sara Dunjo",
608
+ "kwq": "Kwak",
609
+ "kxe": "Kakihum",
610
+ "lii": "Lingkhim",
611
+ "mwj": "Maligo",
612
+ "nnx": "Ngong",
613
+ "oun": "ǃOǃung",
614
+ "pmu": "Mirpur Panjabi",
615
+ "sgo": "Songa",
616
+ "thx": "The",
617
+ "tsf": "Southwestern Tamang",
618
+ "uok": "Uokha",
619
+ "xsj": "Subi",
620
+ "yds": "Yiddish Sign Language",
621
+ "ymt": "Mator-Taygi-Karagas",
622
+ "ynh": "Yangho",
623
+ "bgm": "Baga Mboteni",
624
+ "btl": "Bhatola",
625
+ "cbe": "Chipiajes",
626
+ "cbh": "Cagua",
627
+ "coy": "Coyaima",
628
+ "cqu": "Chilean Quechua",
629
+ "cum": "Cumeral",
630
+ "duj": "Dhuwal",
631
+ "ggn": "Eastern Gurung",
632
+ "ggo": "Southern Gondi",
633
+ "guv": "Gey",
634
+ "iap": "Iapama",
635
+ "ill": "Iranun",
636
+ "kgc": "Kasseng",
637
+ "kox": "Coxima",
638
+ "ktr": "Kota Marudu Tinagas",
639
+ "kvs": "Kunggara",
640
+ "kzj": "Coastal Kadazan",
641
+ "kzt": "Tambunan Dusun",
642
+ "nad": "Nijadali",
643
+ "nts": "Natagaimas",
644
+ "ome": "Omejes",
645
+ "pmc": "Palumata",
646
+ "pod": "Ponares",
647
+ "ppa": "Pao",
648
+ "pry": "Pray 3",
649
+ "rna": "Runa",
650
+ "svr": "Savara",
651
+ "tdu": "Tempasuk Dusun",
652
+ "thc": "Tai Hang Tong",
653
+ "tid": "Tidong",
654
+ "tmp": "Tai Mène",
655
+ "tne": "Tinoc Kallahan",
656
+ "toe": "Tomedes",
657
+ "xba": "Kamba (Brazil)",
658
+ "xbx": "Kabixí",
659
+ "xip": "Xipináwa",
660
+ "xkh": "Karahawyana",
661
+ "yri": "Yarí",
662
+ "jeg": "Jeng",
663
+ "kgd": "Kataang",
664
+ "krm": "Krim",
665
+ "prb": "Lua'",
666
+ "puk": "Pu Ko",
667
+ "rie": "Rien",
668
+ "rsi": "Rennellese Sign Language",
669
+ "skk": "Sok",
670
+ "snh": "Shinabo",
671
+ "lsg": "Lyons Sign Language",
672
+ "mwx": "Mediak",
673
+ "mwy": "Mosiro",
674
+ "ncp": "Ndaktup",
675
+ "ais": "Nataoran Amis",
676
+ "asd": "Asas",
677
+ "dit": "Dirari",
678
+ "dud": "Hun-Saare",
679
+ "lba": "Lui",
680
+ "llo": "Khlor",
681
+ "myd": "Maramba",
682
+ "myi": "Mina (India)",
683
+ "nns": "Ningye",
684
+ "aoh": "Arma",
685
+ "ayy": "Tayabas Ayta",
686
+ "bbz": "Babalia Creole Arabic",
687
+ "bpb": "Barbacoas",
688
+ "cca": "Cauca",
689
+ "cdg": "Chamari",
690
+ "dgu": "Degaru",
691
+ "drr": "Dororo",
692
+ "ekc": "Eastern Karnic",
693
+ "gli": "Guliguli",
694
+ "kjf": "Khalaj",
695
+ "kxl": "Nepali Kurux",
696
+ "kxu": "Kui (India)",
697
+ "lmz": "Lumbee",
698
+ "nxu": "Narau",
699
+ "plp": "Palpa",
700
+ "sdm": "Semandang",
701
+ "tbb": "Tapeba",
702
+ "xrq": "Karranga",
703
+ "xtz": "Tasmanian",
704
+ "zir": "Ziriya",
705
+ "thw": "Thudam",
706
+ "bic": "Bikaru",
707
+ "bij": "Vaghat-Ya-Bijim-Legeri",
708
+ "blg": "Balau",
709
+ "gji": "Geji",
710
+ "mvm": "Muya",
711
+ "ngo": "Ngoni",
712
+ "pat": "Papitalai",
713
+ "vki": "Ija-Zuba",
714
+ "wra": "Warapu",
715
+ "ajt": "Judeo-Tunisian Arabic",
716
+ "cug": "Chungmboko",
717
+ "lak": "Laka (Nigeria)",
718
+ "lno": "Lango (South Sudan)",
719
+ "pii": "Pini",
720
+ "smd": "Sama",
721
+ "snb": "Sebuyau",
722
+ "uun": "Kulon-Pazeh",
723
+ "wrd": "Warduji",
724
+ "wya": "Wyandot",
725
+ }
726
+
727
+
728
+ iso639long = inverse_dict(iso639short)
729
+
730
+ iso639code_retired = inverse_dict(iso639retired)
env-llmeval/lib/python3.10/site-packages/nltk/lazyimport.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module is from mx/DateTime/LazyModule.py and is
2
+ # distributed under the terms of the eGenix.com Public License Agreement
3
+ # https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf
4
+
5
+ """ Helper to enable simple lazy module import.
6
+
7
+ 'Lazy' means the actual import is deferred until an attribute is
8
+ requested from the module's namespace. This has the advantage of
9
+ allowing all imports to be done at the top of a script (in a
10
+ prominent and visible place) without having a great impact
11
+ on startup time.
12
+
13
+ Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:[email protected]
14
+ See the documentation for further information on copyrights,
15
+ or contact the author. All Rights Reserved.
16
+ """
17
+
18
+ ### Constants
19
+
20
+ _debug = 0
21
+
22
+ ###
23
+
24
+
25
+ class LazyModule:
26
+
27
+ """Lazy module class.
28
+
29
+ Lazy modules are imported into the given namespaces whenever a
30
+ non-special attribute (there are some attributes like __doc__
31
+ that class instances handle without calling __getattr__) is
32
+ requested. The module is then registered under the given name
33
+ in locals usually replacing the import wrapper instance. The
34
+ import itself is done using globals as global namespace.
35
+
36
+ Example of creating a lazy load module:
37
+
38
+ ISO = LazyModule('ISO',locals(),globals())
39
+
40
+ Later, requesting an attribute from ISO will load the module
41
+ automatically into the locals() namespace, overriding the
42
+ LazyModule instance:
43
+
44
+ t = ISO.Week(1998,1,1)
45
+
46
+ """
47
+
48
+ # Flag which indicates whether the LazyModule is initialized or not
49
+ __lazymodule_init = 0
50
+
51
+ # Name of the module to load
52
+ __lazymodule_name = ""
53
+
54
+ # Flag which indicates whether the module was loaded or not
55
+ __lazymodule_loaded = 0
56
+
57
+ # Locals dictionary where to register the module
58
+ __lazymodule_locals = None
59
+
60
+ # Globals dictionary to use for the module import
61
+ __lazymodule_globals = None
62
+
63
+ def __init__(self, name, locals, globals=None):
64
+
65
+ """Create a LazyModule instance wrapping module name.
66
+
67
+ The module will later on be registered in locals under the
68
+ given module name.
69
+
70
+ globals is optional and defaults to locals.
71
+
72
+ """
73
+ self.__lazymodule_locals = locals
74
+ if globals is None:
75
+ globals = locals
76
+ self.__lazymodule_globals = globals
77
+ mainname = globals.get("__name__", "")
78
+ if mainname:
79
+ self.__name__ = mainname + "." + name
80
+ self.__lazymodule_name = name
81
+ else:
82
+ self.__name__ = self.__lazymodule_name = name
83
+ self.__lazymodule_init = 1
84
+
85
+ def __lazymodule_import(self):
86
+
87
+ """Import the module now."""
88
+ # Load and register module
89
+ local_name = self.__lazymodule_name # e.g. "toolbox"
90
+ full_name = self.__name__ # e.g. "nltk.toolbox"
91
+ if self.__lazymodule_loaded:
92
+ return self.__lazymodule_locals[local_name]
93
+ if _debug:
94
+ print("LazyModule: Loading module %r" % full_name)
95
+ self.__lazymodule_locals[local_name] = module = __import__(
96
+ full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*"
97
+ )
98
+
99
+ # Fill namespace with all symbols from original module to
100
+ # provide faster access.
101
+ self.__dict__.update(module.__dict__)
102
+
103
+ # Set import flag
104
+ self.__dict__["__lazymodule_loaded"] = 1
105
+
106
+ if _debug:
107
+ print("LazyModule: Module %r loaded" % full_name)
108
+ return module
109
+
110
+ def __getattr__(self, name):
111
+
112
+ """Import the module on demand and get the attribute."""
113
+ if self.__lazymodule_loaded:
114
+ raise AttributeError(name)
115
+ if _debug:
116
+ print(
117
+ "LazyModule: "
118
+ "Module load triggered by attribute %r read access" % name
119
+ )
120
+ module = self.__lazymodule_import()
121
+ return getattr(module, name)
122
+
123
+ def __setattr__(self, name, value):
124
+
125
+ """Import the module on demand and set the attribute."""
126
+ if not self.__lazymodule_init:
127
+ self.__dict__[name] = value
128
+ return
129
+ if self.__lazymodule_loaded:
130
+ self.__lazymodule_locals[self.__lazymodule_name] = value
131
+ self.__dict__[name] = value
132
+ return
133
+ if _debug:
134
+ print(
135
+ "LazyModule: "
136
+ "Module load triggered by attribute %r write access" % name
137
+ )
138
+ module = self.__lazymodule_import()
139
+ setattr(module, name, value)
140
+
141
+ def __repr__(self):
142
+ return "<LazyModule '%s'>" % self.__name__
env-llmeval/lib/python3.10/site-packages/nltk/lm/__init__.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Models
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ NLTK Language Modeling Module.
9
+ ------------------------------
10
+
11
+ Currently this module covers only ngram language models, but it should be easy
12
+ to extend to neural models.
13
+
14
+
15
+ Preparing Data
16
+ ==============
17
+
18
+ Before we train our ngram models it is necessary to make sure the data we put in
19
+ them is in the right format.
20
+ Let's say we have a text that is a list of sentences, where each sentence is
21
+ a list of strings. For simplicity we just consider a text consisting of
22
+ characters instead of words.
23
+
24
+ >>> text = [['a', 'b', 'c'], ['a', 'c', 'd', 'c', 'e', 'f']]
25
+
26
+ If we want to train a bigram model, we need to turn this text into bigrams.
27
+ Here's what the first sentence of our text would look like if we use a function
28
+ from NLTK for this.
29
+
30
+ >>> from nltk.util import bigrams
31
+ >>> list(bigrams(text[0]))
32
+ [('a', 'b'), ('b', 'c')]
33
+
34
+ Notice how "b" occurs both as the first and second member of different bigrams
35
+ but "a" and "c" don't? Wouldn't it be nice to somehow indicate how often sentences
36
+ start with "a" and end with "c"?
37
+ A standard way to deal with this is to add special "padding" symbols to the
38
+ sentence before splitting it into ngrams.
39
+ Fortunately, NLTK also has a function for that, let's see what it does to the
40
+ first sentence.
41
+
42
+ >>> from nltk.util import pad_sequence
43
+ >>> list(pad_sequence(text[0],
44
+ ... pad_left=True,
45
+ ... left_pad_symbol="<s>",
46
+ ... pad_right=True,
47
+ ... right_pad_symbol="</s>",
48
+ ... n=2))
49
+ ['<s>', 'a', 'b', 'c', '</s>']
50
+
51
+ Note the `n` argument, that tells the function we need padding for bigrams.
52
+ Now, passing all these parameters every time is tedious and in most cases they
53
+ can be safely assumed as defaults anyway.
54
+ Thus our module provides a convenience function that has all these arguments
55
+ already set while the other arguments remain the same as for `pad_sequence`.
56
+
57
+ >>> from nltk.lm.preprocessing import pad_both_ends
58
+ >>> list(pad_both_ends(text[0], n=2))
59
+ ['<s>', 'a', 'b', 'c', '</s>']
60
+
61
+ Combining the two parts discussed so far we get the following preparation steps
62
+ for one sentence.
63
+
64
+ >>> list(bigrams(pad_both_ends(text[0], n=2)))
65
+ [('<s>', 'a'), ('a', 'b'), ('b', 'c'), ('c', '</s>')]
66
+
67
+ To make our model more robust we could also train it on unigrams (single words)
68
+ as well as bigrams, its main source of information.
69
+ NLTK once again helpfully provides a function called `everygrams`.
70
+ While not the most efficient, it is conceptually simple.
71
+
72
+
73
+ >>> from nltk.util import everygrams
74
+ >>> padded_bigrams = list(pad_both_ends(text[0], n=2))
75
+ >>> list(everygrams(padded_bigrams, max_len=2))
76
+ [('<s>',), ('<s>', 'a'), ('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',), ('c', '</s>'), ('</s>',)]
77
+
78
+ We are almost ready to start counting ngrams, just one more step left.
79
+ During training and evaluation our model will rely on a vocabulary that
80
+ defines which words are "known" to the model.
81
+ To create this vocabulary we need to pad our sentences (just like for counting
82
+ ngrams) and then combine the sentences into one flat stream of words.
83
+
84
+ >>> from nltk.lm.preprocessing import flatten
85
+ >>> list(flatten(pad_both_ends(sent, n=2) for sent in text))
86
+ ['<s>', 'a', 'b', 'c', '</s>', '<s>', 'a', 'c', 'd', 'c', 'e', 'f', '</s>']
87
+
88
+ In most cases we want to use the same text as the source for both vocabulary
89
+ and ngram counts.
90
+ Now that we understand what this means for our preprocessing, we can simply import
91
+ a function that does everything for us.
92
+
93
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
94
+ >>> train, vocab = padded_everygram_pipeline(2, text)
95
+
96
+ So as to avoid re-creating the text in memory, both `train` and `vocab` are lazy
97
+ iterators. They are evaluated on demand at training time.
98
+
99
+
100
+ Training
101
+ ========
102
+ Having prepared our data we are ready to start training a model.
103
+ As a simple example, let us train a Maximum Likelihood Estimator (MLE).
104
+ We only need to specify the highest ngram order to instantiate it.
105
+
106
+ >>> from nltk.lm import MLE
107
+ >>> lm = MLE(2)
108
+
109
+ This automatically creates an empty vocabulary...
110
+
111
+ >>> len(lm.vocab)
112
+ 0
113
+
114
+ ... which gets filled as we fit the model.
115
+
116
+ >>> lm.fit(train, vocab)
117
+ >>> print(lm.vocab)
118
+ <Vocabulary with cutoff=1 unk_label='<UNK>' and 9 items>
119
+ >>> len(lm.vocab)
120
+ 9
121
+
122
+ The vocabulary helps us handle words that have not occurred during training.
123
+
124
+ >>> lm.vocab.lookup(text[0])
125
+ ('a', 'b', 'c')
126
+ >>> lm.vocab.lookup(["aliens", "from", "Mars"])
127
+ ('<UNK>', '<UNK>', '<UNK>')
128
+
129
+ Moreover, in some cases we want to ignore words that we did see during training
130
+ but that didn't occur frequently enough, to provide us useful information.
131
+ You can tell the vocabulary to ignore such words.
132
+ To find out how that works, check out the docs for the `Vocabulary` class.
133
+
134
+
135
+ Using a Trained Model
136
+ =====================
137
+ When it comes to ngram models the training boils down to counting up the ngrams
138
+ from the training corpus.
139
+
140
+ >>> print(lm.counts)
141
+ <NgramCounter with 2 ngram orders and 24 ngrams>
142
+
143
+ This provides a convenient interface to access counts for unigrams...
144
+
145
+ >>> lm.counts['a']
146
+ 2
147
+
148
+ ...and bigrams (in this case "a b")
149
+
150
+ >>> lm.counts[['a']]['b']
151
+ 1
152
+
153
+ And so on. However, the real purpose of training a language model is to have it
154
+ score how probable words are in certain contexts.
155
+ This being MLE, the model returns the item's relative frequency as its score.
156
+
157
+ >>> lm.score("a")
158
+ 0.15384615384615385
159
+
160
+ Items that are not seen during training are mapped to the vocabulary's
161
+ "unknown label" token. This is "<UNK>" by default.
162
+
163
+ >>> lm.score("<UNK>") == lm.score("aliens")
164
+ True
165
+
166
+ Here's how you get the score for a word given some preceding context.
167
+ For example we want to know what is the chance that "b" is preceded by "a".
168
+
169
+ >>> lm.score("b", ["a"])
170
+ 0.5
171
+
172
+ To avoid underflow when working with many small score values it makes sense to
173
+ take their logarithm.
174
+ For convenience this can be done with the `logscore` method.
175
+
176
+ >>> lm.logscore("a")
177
+ -2.700439718141092
178
+
179
+ Building on this method, we can also evaluate our model's cross-entropy and
180
+ perplexity with respect to sequences of ngrams.
181
+
182
+ >>> test = [('a', 'b'), ('c', 'd')]
183
+ >>> lm.entropy(test)
184
+ 1.292481250360578
185
+ >>> lm.perplexity(test)
186
+ 2.449489742783178
187
+
188
+ It is advisable to preprocess your test text exactly the same way as you did
189
+ the training text.
190
+
191
+ One cool feature of ngram models is that they can be used to generate text.
192
+
193
+ >>> lm.generate(1, random_seed=3)
194
+ '<s>'
195
+ >>> lm.generate(5, random_seed=3)
196
+ ['<s>', 'a', 'b', 'c', 'd']
197
+
198
+ Provide `random_seed` if you want to consistently reproduce the same text all
199
+ other things being equal. Here we are using it to test the examples.
200
+
201
+ You can also condition your generation on some preceding text with the `context`
202
+ argument.
203
+
204
+ >>> lm.generate(5, text_seed=['c'], random_seed=3)
205
+ ['</s>', 'c', 'd', 'c', 'd']
206
+
207
+ Note that an ngram model is restricted in how much preceding context it can
208
+ take into account. For example, a trigram model can only condition its output
209
+ on 2 preceding words. If you pass in a 4-word context, the first two words
210
+ will be ignored.
211
+ """
212
+
213
+ from nltk.lm.counter import NgramCounter
214
+ from nltk.lm.models import (
215
+ MLE,
216
+ AbsoluteDiscountingInterpolated,
217
+ KneserNeyInterpolated,
218
+ Laplace,
219
+ Lidstone,
220
+ StupidBackoff,
221
+ WittenBellInterpolated,
222
+ )
223
+ from nltk.lm.vocabulary import Vocabulary
224
+
225
+ __all__ = [
226
+ "Vocabulary",
227
+ "NgramCounter",
228
+ "MLE",
229
+ "Lidstone",
230
+ "Laplace",
231
+ "WittenBellInterpolated",
232
+ "KneserNeyInterpolated",
233
+ "AbsoluteDiscountingInterpolated",
234
+ "StupidBackoff",
235
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/lm/api.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Models
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """Language Model Interface."""
8
+
9
+ import random
10
+ import warnings
11
+ from abc import ABCMeta, abstractmethod
12
+ from bisect import bisect
13
+ from itertools import accumulate
14
+
15
+ from nltk.lm.counter import NgramCounter
16
+ from nltk.lm.util import log_base2
17
+ from nltk.lm.vocabulary import Vocabulary
18
+
19
+
20
+ class Smoothing(metaclass=ABCMeta):
21
+ """Ngram Smoothing Interface
22
+
23
+ Implements Chen & Goodman 1995's idea that all smoothing algorithms have
24
+ certain features in common. This should ideally allow smoothing algorithms to
25
+ work both with Backoff and Interpolation.
26
+ """
27
+
28
+ def __init__(self, vocabulary, counter):
29
+ """
30
+ :param vocabulary: The Ngram vocabulary object.
31
+ :type vocabulary: nltk.lm.vocab.Vocabulary
32
+ :param counter: The counts of the vocabulary items.
33
+ :type counter: nltk.lm.counter.NgramCounter
34
+ """
35
+ self.vocab = vocabulary
36
+ self.counts = counter
37
+
38
+ @abstractmethod
39
+ def unigram_score(self, word):
40
+ raise NotImplementedError()
41
+
42
+ @abstractmethod
43
+ def alpha_gamma(self, word, context):
44
+ raise NotImplementedError()
45
+
46
+
47
+ def _mean(items):
48
+ """Return average (aka mean) for sequence of items."""
49
+ return sum(items) / len(items)
50
+
51
+
52
+ def _random_generator(seed_or_generator):
53
+ if isinstance(seed_or_generator, random.Random):
54
+ return seed_or_generator
55
+ return random.Random(seed_or_generator)
56
+
57
+
58
+ def _weighted_choice(population, weights, random_generator=None):
59
+ """Like random.choice, but with weights.
60
+
61
+ Heavily inspired by python 3.6 `random.choices`.
62
+ """
63
+ if not population:
64
+ raise ValueError("Can't choose from empty population")
65
+ if len(population) != len(weights):
66
+ raise ValueError("The number of weights does not match the population")
67
+ cum_weights = list(accumulate(weights))
68
+ total = cum_weights[-1]
69
+ threshold = random_generator.random()
70
+ return population[bisect(cum_weights, total * threshold)]
71
+
72
+
73
+ class LanguageModel(metaclass=ABCMeta):
74
+ """ABC for Language Models.
75
+
76
+ Cannot be directly instantiated itself.
77
+
78
+ """
79
+
80
+ def __init__(self, order, vocabulary=None, counter=None):
81
+ """Creates new LanguageModel.
82
+
83
+ :param vocabulary: If provided, this vocabulary will be used instead
84
+ of creating a new one when training.
85
+ :type vocabulary: `nltk.lm.Vocabulary` or None
86
+ :param counter: If provided, use this object to count ngrams.
87
+ :type counter: `nltk.lm.NgramCounter` or None
88
+ :param ngrams_fn: If given, defines how sentences in training text are turned to ngram
89
+ sequences.
90
+ :type ngrams_fn: function or None
91
+ :param pad_fn: If given, defines how sentences in training text are padded.
92
+ :type pad_fn: function or None
93
+ """
94
+ self.order = order
95
+ if vocabulary and not isinstance(vocabulary, Vocabulary):
96
+ warnings.warn(
97
+ f"The `vocabulary` argument passed to {self.__class__.__name__!r} "
98
+ "must be an instance of `nltk.lm.Vocabulary`.",
99
+ stacklevel=3,
100
+ )
101
+ self.vocab = Vocabulary() if vocabulary is None else vocabulary
102
+ self.counts = NgramCounter() if counter is None else counter
103
+
104
+ def fit(self, text, vocabulary_text=None):
105
+ """Trains the model on a text.
106
+
107
+ :param text: Training text as a sequence of sentences.
108
+
109
+ """
110
+ if not self.vocab:
111
+ if vocabulary_text is None:
112
+ raise ValueError(
113
+ "Cannot fit without a vocabulary or text to create it from."
114
+ )
115
+ self.vocab.update(vocabulary_text)
116
+ self.counts.update(self.vocab.lookup(sent) for sent in text)
117
+
118
+ def score(self, word, context=None):
119
+ """Masks out of vocab (OOV) words and computes their model score.
120
+
121
+ For model-specific logic of calculating scores, see the `unmasked_score`
122
+ method.
123
+ """
124
+ return self.unmasked_score(
125
+ self.vocab.lookup(word), self.vocab.lookup(context) if context else None
126
+ )
127
+
128
+ @abstractmethod
129
+ def unmasked_score(self, word, context=None):
130
+ """Score a word given some optional context.
131
+
132
+ Concrete models are expected to provide an implementation.
133
+ Note that this method does not mask its arguments with the OOV label.
134
+ Use the `score` method for that.
135
+
136
+ :param str word: Word for which we want the score
137
+ :param tuple(str) context: Context the word is in.
138
+ If `None`, compute unigram score.
139
+ :param context: tuple(str) or None
140
+ :rtype: float
141
+ """
142
+ raise NotImplementedError()
143
+
144
+ def logscore(self, word, context=None):
145
+ """Evaluate the log score of this word in this context.
146
+
147
+ The arguments are the same as for `score` and `unmasked_score`.
148
+
149
+ """
150
+ return log_base2(self.score(word, context))
151
+
152
+ def context_counts(self, context):
153
+ """Helper method for retrieving counts for a given context.
154
+
155
+ Assumes context has been checked and oov words in it masked.
156
+ :type context: tuple(str) or None
157
+
158
+ """
159
+ return (
160
+ self.counts[len(context) + 1][context] if context else self.counts.unigrams
161
+ )
162
+
163
+ def entropy(self, text_ngrams):
164
+ """Calculate cross-entropy of model for given evaluation text.
165
+
166
+ :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.
167
+ :rtype: float
168
+
169
+ """
170
+ return -1 * _mean(
171
+ [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]
172
+ )
173
+
174
+ def perplexity(self, text_ngrams):
175
+ """Calculates the perplexity of the given text.
176
+
177
+ This is simply 2 ** cross-entropy for the text, so the arguments are the same.
178
+
179
+ """
180
+ return pow(2.0, self.entropy(text_ngrams))
181
+
182
+ def generate(self, num_words=1, text_seed=None, random_seed=None):
183
+ """Generate words from the model.
184
+
185
+ :param int num_words: How many words to generate. By default 1.
186
+ :param text_seed: Generation can be conditioned on preceding context.
187
+ :param random_seed: A random seed or an instance of `random.Random`. If provided,
188
+ makes the random sampling part of generation reproducible.
189
+ :return: One (str) word or a list of words generated from model.
190
+
191
+ Examples:
192
+
193
+ >>> from nltk.lm import MLE
194
+ >>> lm = MLE(2)
195
+ >>> lm.fit([[("a", "b"), ("b", "c")]], vocabulary_text=['a', 'b', 'c'])
196
+ >>> lm.fit([[("a",), ("b",), ("c",)]])
197
+ >>> lm.generate(random_seed=3)
198
+ 'a'
199
+ >>> lm.generate(text_seed=['a'])
200
+ 'b'
201
+
202
+ """
203
+ text_seed = [] if text_seed is None else list(text_seed)
204
+ random_generator = _random_generator(random_seed)
205
+ # This is the base recursion case.
206
+ if num_words == 1:
207
+ context = (
208
+ text_seed[-self.order + 1 :]
209
+ if len(text_seed) >= self.order
210
+ else text_seed
211
+ )
212
+ samples = self.context_counts(self.vocab.lookup(context))
213
+ while context and not samples:
214
+ context = context[1:] if len(context) > 1 else []
215
+ samples = self.context_counts(self.vocab.lookup(context))
216
+ # Sorting samples achieves two things:
217
+ # - reproducible randomness when sampling
218
+ # - turns Mapping into Sequence which `_weighted_choice` expects
219
+ samples = sorted(samples)
220
+ return _weighted_choice(
221
+ samples,
222
+ tuple(self.score(w, context) for w in samples),
223
+ random_generator,
224
+ )
225
+ # We build up text one word at a time using the preceding context.
226
+ generated = []
227
+ for _ in range(num_words):
228
+ generated.append(
229
+ self.generate(
230
+ num_words=1,
231
+ text_seed=text_seed + generated,
232
+ random_seed=random_generator,
233
+ )
234
+ )
235
+ return generated
env-llmeval/lib/python3.10/site-packages/nltk/lm/counter.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ Language Model Counter
9
+ ----------------------
10
+ """
11
+
12
+ from collections import defaultdict
13
+ from collections.abc import Sequence
14
+
15
+ from nltk.probability import ConditionalFreqDist, FreqDist
16
+
17
+
18
+ class NgramCounter:
19
+ """Class for counting ngrams.
20
+
21
+ Will count any ngram sequence you give it ;)
22
+
23
+ First we need to make sure we are feeding the counter sentences of ngrams.
24
+
25
+ >>> text = [["a", "b", "c", "d"], ["a", "c", "d", "c"]]
26
+ >>> from nltk.util import ngrams
27
+ >>> text_bigrams = [ngrams(sent, 2) for sent in text]
28
+ >>> text_unigrams = [ngrams(sent, 1) for sent in text]
29
+
30
+ The counting itself is very simple.
31
+
32
+ >>> from nltk.lm import NgramCounter
33
+ >>> ngram_counts = NgramCounter(text_bigrams + text_unigrams)
34
+
35
+ You can conveniently access ngram counts using standard python dictionary notation.
36
+ String keys will give you unigram counts.
37
+
38
+ >>> ngram_counts['a']
39
+ 2
40
+ >>> ngram_counts['aliens']
41
+ 0
42
+
43
+ If you want to access counts for higher order ngrams, use a list or a tuple.
44
+ These are treated as "context" keys, so what you get is a frequency distribution
45
+ over all continuations after the given context.
46
+
47
+ >>> sorted(ngram_counts[['a']].items())
48
+ [('b', 1), ('c', 1)]
49
+ >>> sorted(ngram_counts[('a',)].items())
50
+ [('b', 1), ('c', 1)]
51
+
52
+ This is equivalent to specifying explicitly the order of the ngram (in this case
53
+ 2 for bigram) and indexing on the context.
54
+
55
+ >>> ngram_counts[2][('a',)] is ngram_counts[['a']]
56
+ True
57
+
58
+ Note that the keys in `ConditionalFreqDist` cannot be lists, only tuples!
59
+ It is generally advisable to use the less verbose and more flexible square
60
+ bracket notation.
61
+
62
+ To get the count of the full ngram "a b", do this:
63
+
64
+ >>> ngram_counts[['a']]['b']
65
+ 1
66
+
67
+ Specifying the ngram order as a number can be useful for accessing all ngrams
68
+ in that order.
69
+
70
+ >>> ngram_counts[2]
71
+ <ConditionalFreqDist with 4 conditions>
72
+
73
+ The keys of this `ConditionalFreqDist` are the contexts we discussed earlier.
74
+ Unigrams can also be accessed with a human-friendly alias.
75
+
76
+ >>> ngram_counts.unigrams is ngram_counts[1]
77
+ True
78
+
79
+ Similarly to `collections.Counter`, you can update counts after initialization.
80
+
81
+ >>> ngram_counts['e']
82
+ 0
83
+ >>> ngram_counts.update([ngrams(["d", "e", "f"], 1)])
84
+ >>> ngram_counts['e']
85
+ 1
86
+
87
+ """
88
+
89
+ def __init__(self, ngram_text=None):
90
+ """Creates a new NgramCounter.
91
+
92
+ If `ngram_text` is specified, counts ngrams from it, otherwise waits for
93
+ `update` method to be called explicitly.
94
+
95
+ :param ngram_text: Optional text containing sentences of ngrams, as for `update` method.
96
+ :type ngram_text: Iterable(Iterable(tuple(str))) or None
97
+
98
+ """
99
+ self._counts = defaultdict(ConditionalFreqDist)
100
+ self._counts[1] = self.unigrams = FreqDist()
101
+
102
+ if ngram_text:
103
+ self.update(ngram_text)
104
+
105
+ def update(self, ngram_text):
106
+ """Updates ngram counts from `ngram_text`.
107
+
108
+ Expects `ngram_text` to be a sequence of sentences (sequences).
109
+ Each sentence consists of ngrams as tuples of strings.
110
+
111
+ :param Iterable(Iterable(tuple(str))) ngram_text: Text containing sentences of ngrams.
112
+ :raises TypeError: if the ngrams are not tuples.
113
+
114
+ """
115
+
116
+ for sent in ngram_text:
117
+ for ngram in sent:
118
+ if not isinstance(ngram, tuple):
119
+ raise TypeError(
120
+ "Ngram <{}> isn't a tuple, " "but {}".format(ngram, type(ngram))
121
+ )
122
+
123
+ ngram_order = len(ngram)
124
+ if ngram_order == 1:
125
+ self.unigrams[ngram[0]] += 1
126
+ continue
127
+
128
+ context, word = ngram[:-1], ngram[-1]
129
+ self[ngram_order][context][word] += 1
130
+
131
+ def N(self):
132
+ """Returns grand total number of ngrams stored.
133
+
134
+ This includes ngrams from all orders, so some duplication is expected.
135
+ :rtype: int
136
+
137
+ >>> from nltk.lm import NgramCounter
138
+ >>> counts = NgramCounter([[("a", "b"), ("c",), ("d", "e")]])
139
+ >>> counts.N()
140
+ 3
141
+
142
+ """
143
+ return sum(val.N() for val in self._counts.values())
144
+
145
+ def __getitem__(self, item):
146
+ """User-friendly access to ngram counts."""
147
+ if isinstance(item, int):
148
+ return self._counts[item]
149
+ elif isinstance(item, str):
150
+ return self._counts.__getitem__(1)[item]
151
+ elif isinstance(item, Sequence):
152
+ return self._counts.__getitem__(len(item) + 1)[tuple(item)]
153
+
154
+ def __str__(self):
155
+ return "<{} with {} ngram orders and {} ngrams>".format(
156
+ self.__class__.__name__, len(self._counts), self.N()
157
+ )
158
+
159
+ def __len__(self):
160
+ return self._counts.__len__()
161
+
162
+ def __contains__(self, item):
163
+ return item in self._counts
env-llmeval/lib/python3.10/site-packages/nltk/lm/models.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Models
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # Manu Joseph <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """Language Models"""
9
+
10
+ from nltk.lm.api import LanguageModel, Smoothing
11
+ from nltk.lm.smoothing import AbsoluteDiscounting, KneserNey, WittenBell
12
+
13
+
14
+ class MLE(LanguageModel):
15
+ """Class for providing MLE ngram model scores.
16
+
17
+ Inherits initialization from BaseNgramModel.
18
+ """
19
+
20
+ def unmasked_score(self, word, context=None):
21
+ """Returns the MLE score for a word given a context.
22
+
23
+ Args:
24
+ - word is expected to be a string
25
+ - context is expected to be something reasonably convertible to a tuple
26
+ """
27
+ return self.context_counts(context).freq(word)
28
+
29
+
30
+ class Lidstone(LanguageModel):
31
+ """Provides Lidstone-smoothed scores.
32
+
33
+ In addition to initialization arguments from BaseNgramModel also requires
34
+ a number by which to increase the counts, gamma.
35
+ """
36
+
37
+ def __init__(self, gamma, *args, **kwargs):
38
+ super().__init__(*args, **kwargs)
39
+ self.gamma = gamma
40
+
41
+ def unmasked_score(self, word, context=None):
42
+ """Add-one smoothing: Lidstone or Laplace.
43
+
44
+ To see what kind, look at `gamma` attribute on the class.
45
+
46
+ """
47
+ counts = self.context_counts(context)
48
+ word_count = counts[word]
49
+ norm_count = counts.N()
50
+ return (word_count + self.gamma) / (norm_count + len(self.vocab) * self.gamma)
51
+
52
+
53
+ class Laplace(Lidstone):
54
+ """Implements Laplace (add one) smoothing.
55
+
56
+ Initialization identical to BaseNgramModel because gamma is always 1.
57
+ """
58
+
59
+ def __init__(self, *args, **kwargs):
60
+ super().__init__(1, *args, **kwargs)
61
+
62
+
63
+ class StupidBackoff(LanguageModel):
64
+ """Provides StupidBackoff scores.
65
+
66
+ In addition to initialization arguments from BaseNgramModel also requires
67
+ a parameter alpha with which we scale the lower order probabilities.
68
+ Note that this is not a true probability distribution as scores for ngrams
69
+ of the same order do not sum up to unity.
70
+ """
71
+
72
+ def __init__(self, alpha=0.4, *args, **kwargs):
73
+ super().__init__(*args, **kwargs)
74
+ self.alpha = alpha
75
+
76
+ def unmasked_score(self, word, context=None):
77
+ if not context:
78
+ # Base recursion
79
+ return self.counts.unigrams.freq(word)
80
+ counts = self.context_counts(context)
81
+ word_count = counts[word]
82
+ norm_count = counts.N()
83
+ if word_count > 0:
84
+ return word_count / norm_count
85
+ else:
86
+ return self.alpha * self.unmasked_score(word, context[1:])
87
+
88
+
89
+ class InterpolatedLanguageModel(LanguageModel):
90
+ """Logic common to all interpolated language models.
91
+
92
+ The idea to abstract this comes from Chen & Goodman 1995.
93
+ Do not instantiate this class directly!
94
+ """
95
+
96
+ def __init__(self, smoothing_cls, order, **kwargs):
97
+ params = kwargs.pop("params", {})
98
+ super().__init__(order, **kwargs)
99
+ self.estimator = smoothing_cls(self.vocab, self.counts, **params)
100
+
101
+ def unmasked_score(self, word, context=None):
102
+ if not context:
103
+ # The base recursion case: no context, we only have a unigram.
104
+ return self.estimator.unigram_score(word)
105
+ if not self.counts[context]:
106
+ # It can also happen that we have no data for this context.
107
+ # In that case we defer to the lower-order ngram.
108
+ # This is the same as setting alpha to 0 and gamma to 1.
109
+ alpha, gamma = 0, 1
110
+ else:
111
+ alpha, gamma = self.estimator.alpha_gamma(word, context)
112
+ return alpha + gamma * self.unmasked_score(word, context[1:])
113
+
114
+
115
+ class WittenBellInterpolated(InterpolatedLanguageModel):
116
+ """Interpolated version of Witten-Bell smoothing."""
117
+
118
+ def __init__(self, order, **kwargs):
119
+ super().__init__(WittenBell, order, **kwargs)
120
+
121
+
122
+ class AbsoluteDiscountingInterpolated(InterpolatedLanguageModel):
123
+ """Interpolated version of smoothing with absolute discount."""
124
+
125
+ def __init__(self, order, discount=0.75, **kwargs):
126
+ super().__init__(
127
+ AbsoluteDiscounting, order, params={"discount": discount}, **kwargs
128
+ )
129
+
130
+
131
+ class KneserNeyInterpolated(InterpolatedLanguageModel):
132
+ """Interpolated version of Kneser-Ney smoothing."""
133
+
134
+ def __init__(self, order, discount=0.1, **kwargs):
135
+ if not (0 <= discount <= 1):
136
+ raise ValueError(
137
+ "Discount must be between 0 and 1 for probabilities to sum to unity."
138
+ )
139
+ super().__init__(
140
+ KneserNey, order, params={"discount": discount, "order": order}, **kwargs
141
+ )
env-llmeval/lib/python3.10/site-packages/nltk/lm/preprocessing.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language Model Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ from functools import partial
8
+ from itertools import chain
9
+
10
+ from nltk.util import everygrams, pad_sequence
11
+
12
+ flatten = chain.from_iterable
13
+ pad_both_ends = partial(
14
+ pad_sequence,
15
+ pad_left=True,
16
+ left_pad_symbol="<s>",
17
+ pad_right=True,
18
+ right_pad_symbol="</s>",
19
+ )
20
+ pad_both_ends.__doc__ = """Pads both ends of a sentence to length specified by ngram order.
21
+
22
+ Following convention <s> pads the start of sentence </s> pads its end.
23
+ """
24
+
25
+
26
+ def padded_everygrams(order, sentence):
27
+ """Helper with some useful defaults.
28
+
29
+ Applies pad_both_ends to sentence and follows it up with everygrams.
30
+ """
31
+ return everygrams(list(pad_both_ends(sentence, n=order)), max_len=order)
32
+
33
+
34
+ def padded_everygram_pipeline(order, text):
35
+ """Default preprocessing for a sequence of sentences.
36
+
37
+ Creates two iterators:
38
+
39
+ - sentences padded and turned into sequences of `nltk.util.everygrams`
40
+ - sentences padded as above and chained together for a flat stream of words
41
+
42
+ :param order: Largest ngram length produced by `everygrams`.
43
+ :param text: Text to iterate over. Expected to be an iterable of sentences.
44
+ :type text: Iterable[Iterable[str]]
45
+ :return: iterator over text as ngrams, iterator over text as vocabulary data
46
+ """
47
+ padding_fn = partial(pad_both_ends, n=order)
48
+ return (
49
+ (everygrams(list(padding_fn(sent)), max_len=order) for sent in text),
50
+ flatten(map(padding_fn, text)),
51
+ )
env-llmeval/lib/python3.10/site-packages/nltk/lm/util.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """Language Model Utilities"""
8
+
9
+ from math import log
10
+
11
+ NEG_INF = float("-inf")
12
+ POS_INF = float("inf")
13
+
14
+
15
+ def log_base2(score):
16
+ """Convenience function for computing logarithms with base 2."""
17
+ if score == 0.0:
18
+ return NEG_INF
19
+ return log(score, 2)
env-llmeval/lib/python3.10/site-packages/nltk/lm/vocabulary.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ilia Kurenkov <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """Language Model Vocabulary"""
8
+
9
+ import sys
10
+ from collections import Counter
11
+ from collections.abc import Iterable
12
+ from functools import singledispatch
13
+ from itertools import chain
14
+
15
+
16
+ @singledispatch
17
+ def _dispatched_lookup(words, vocab):
18
+ raise TypeError(f"Unsupported type for looking up in vocabulary: {type(words)}")
19
+
20
+
21
+ @_dispatched_lookup.register(Iterable)
22
+ def _(words, vocab):
23
+ """Look up a sequence of words in the vocabulary.
24
+
25
+ Returns an iterator over looked up words.
26
+
27
+ """
28
+ return tuple(_dispatched_lookup(w, vocab) for w in words)
29
+
30
+
31
+ @_dispatched_lookup.register(str)
32
+ def _string_lookup(word, vocab):
33
+ """Looks up one word in the vocabulary."""
34
+ return word if word in vocab else vocab.unk_label
35
+
36
+
37
+ class Vocabulary:
38
+ """Stores language model vocabulary.
39
+
40
+ Satisfies two common language modeling requirements for a vocabulary:
41
+
42
+ - When checking membership and calculating its size, filters items
43
+ by comparing their counts to a cutoff value.
44
+ - Adds a special "unknown" token which unseen words are mapped to.
45
+
46
+ >>> words = ['a', 'c', '-', 'd', 'c', 'a', 'b', 'r', 'a', 'c', 'd']
47
+ >>> from nltk.lm import Vocabulary
48
+ >>> vocab = Vocabulary(words, unk_cutoff=2)
49
+
50
+ Tokens with counts greater than or equal to the cutoff value will
51
+ be considered part of the vocabulary.
52
+
53
+ >>> vocab['c']
54
+ 3
55
+ >>> 'c' in vocab
56
+ True
57
+ >>> vocab['d']
58
+ 2
59
+ >>> 'd' in vocab
60
+ True
61
+
62
+ Tokens with frequency counts less than the cutoff value will be considered not
63
+ part of the vocabulary even though their entries in the count dictionary are
64
+ preserved.
65
+
66
+ >>> vocab['b']
67
+ 1
68
+ >>> 'b' in vocab
69
+ False
70
+ >>> vocab['aliens']
71
+ 0
72
+ >>> 'aliens' in vocab
73
+ False
74
+
75
+ Keeping the count entries for seen words allows us to change the cutoff value
76
+ without having to recalculate the counts.
77
+
78
+ >>> vocab2 = Vocabulary(vocab.counts, unk_cutoff=1)
79
+ >>> "b" in vocab2
80
+ True
81
+
82
+ The cutoff value influences not only membership checking but also the result of
83
+ getting the size of the vocabulary using the built-in `len`.
84
+ Note that while the number of keys in the vocabulary's counter stays the same,
85
+ the items in the vocabulary differ depending on the cutoff.
86
+ We use `sorted` to demonstrate because it keeps the order consistent.
87
+
88
+ >>> sorted(vocab2.counts)
89
+ ['-', 'a', 'b', 'c', 'd', 'r']
90
+ >>> sorted(vocab2)
91
+ ['-', '<UNK>', 'a', 'b', 'c', 'd', 'r']
92
+ >>> sorted(vocab.counts)
93
+ ['-', 'a', 'b', 'c', 'd', 'r']
94
+ >>> sorted(vocab)
95
+ ['<UNK>', 'a', 'c', 'd']
96
+
97
+ In addition to items it gets populated with, the vocabulary stores a special
98
+ token that stands in for so-called "unknown" items. By default it's "<UNK>".
99
+
100
+ >>> "<UNK>" in vocab
101
+ True
102
+
103
+ We can look up words in a vocabulary using its `lookup` method.
104
+ "Unseen" words (with counts less than cutoff) are looked up as the unknown label.
105
+ If given one word (a string) as an input, this method will return a string.
106
+
107
+ >>> vocab.lookup("a")
108
+ 'a'
109
+ >>> vocab.lookup("aliens")
110
+ '<UNK>'
111
+
112
+ If given a sequence, it will return an tuple of the looked up words.
113
+
114
+ >>> vocab.lookup(["p", 'a', 'r', 'd', 'b', 'c'])
115
+ ('<UNK>', 'a', '<UNK>', 'd', '<UNK>', 'c')
116
+
117
+ It's possible to update the counts after the vocabulary has been created.
118
+ In general, the interface is the same as that of `collections.Counter`.
119
+
120
+ >>> vocab['b']
121
+ 1
122
+ >>> vocab.update(["b", "b", "c"])
123
+ >>> vocab['b']
124
+ 3
125
+ """
126
+
127
+ def __init__(self, counts=None, unk_cutoff=1, unk_label="<UNK>"):
128
+ """Create a new Vocabulary.
129
+
130
+ :param counts: Optional iterable or `collections.Counter` instance to
131
+ pre-seed the Vocabulary. In case it is iterable, counts
132
+ are calculated.
133
+ :param int unk_cutoff: Words that occur less frequently than this value
134
+ are not considered part of the vocabulary.
135
+ :param unk_label: Label for marking words not part of vocabulary.
136
+
137
+ """
138
+ self.unk_label = unk_label
139
+ if unk_cutoff < 1:
140
+ raise ValueError(f"Cutoff value cannot be less than 1. Got: {unk_cutoff}")
141
+ self._cutoff = unk_cutoff
142
+
143
+ self.counts = Counter()
144
+ self.update(counts if counts is not None else "")
145
+
146
+ @property
147
+ def cutoff(self):
148
+ """Cutoff value.
149
+
150
+ Items with count below this value are not considered part of vocabulary.
151
+
152
+ """
153
+ return self._cutoff
154
+
155
+ def update(self, *counter_args, **counter_kwargs):
156
+ """Update vocabulary counts.
157
+
158
+ Wraps `collections.Counter.update` method.
159
+
160
+ """
161
+ self.counts.update(*counter_args, **counter_kwargs)
162
+ self._len = sum(1 for _ in self)
163
+
164
+ def lookup(self, words):
165
+ """Look up one or more words in the vocabulary.
166
+
167
+ If passed one word as a string will return that word or `self.unk_label`.
168
+ Otherwise will assume it was passed a sequence of words, will try to look
169
+ each of them up and return an iterator over the looked up words.
170
+
171
+ :param words: Word(s) to look up.
172
+ :type words: Iterable(str) or str
173
+ :rtype: generator(str) or str
174
+ :raises: TypeError for types other than strings or iterables
175
+
176
+ >>> from nltk.lm import Vocabulary
177
+ >>> vocab = Vocabulary(["a", "b", "c", "a", "b"], unk_cutoff=2)
178
+ >>> vocab.lookup("a")
179
+ 'a'
180
+ >>> vocab.lookup("aliens")
181
+ '<UNK>'
182
+ >>> vocab.lookup(["a", "b", "c", ["x", "b"]])
183
+ ('a', 'b', '<UNK>', ('<UNK>', 'b'))
184
+
185
+ """
186
+ return _dispatched_lookup(words, self)
187
+
188
+ def __getitem__(self, item):
189
+ return self._cutoff if item == self.unk_label else self.counts[item]
190
+
191
+ def __contains__(self, item):
192
+ """Only consider items with counts GE to cutoff as being in the
193
+ vocabulary."""
194
+ return self[item] >= self.cutoff
195
+
196
+ def __iter__(self):
197
+ """Building on membership check define how to iterate over
198
+ vocabulary."""
199
+ return chain(
200
+ (item for item in self.counts if item in self),
201
+ [self.unk_label] if self.counts else [],
202
+ )
203
+
204
+ def __len__(self):
205
+ """Computing size of vocabulary reflects the cutoff."""
206
+ return self._len
207
+
208
+ def __eq__(self, other):
209
+ return (
210
+ self.unk_label == other.unk_label
211
+ and self.cutoff == other.cutoff
212
+ and self.counts == other.counts
213
+ )
214
+
215
+ def __str__(self):
216
+ return "<{} with cutoff={} unk_label='{}' and {} items>".format(
217
+ self.__class__.__name__, self.cutoff, self.unk_label, len(self)
218
+ )
env-llmeval/lib/python3.10/site-packages/nltk/probability.py ADDED
@@ -0,0 +1,2578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Probability and Statistics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (additions)
6
+ # Trevor Cohn <[email protected]> (additions)
7
+ # Peter Ljunglöf <[email protected]> (additions)
8
+ # Liang Dong <[email protected]> (additions)
9
+ # Geoffrey Sampson <[email protected]> (additions)
10
+ # Ilia Kurenkov <[email protected]> (additions)
11
+ #
12
+ # URL: <https://www.nltk.org/>
13
+ # For license information, see LICENSE.TXT
14
+
15
+ """
16
+ Classes for representing and processing probabilistic information.
17
+
18
+ The ``FreqDist`` class is used to encode "frequency distributions",
19
+ which count the number of times that each outcome of an experiment
20
+ occurs.
21
+
22
+ The ``ProbDistI`` class defines a standard interface for "probability
23
+ distributions", which encode the probability of each outcome for an
24
+ experiment. There are two types of probability distribution:
25
+
26
+ - "derived probability distributions" are created from frequency
27
+ distributions. They attempt to model the probability distribution
28
+ that generated the frequency distribution.
29
+ - "analytic probability distributions" are created directly from
30
+ parameters (such as variance).
31
+
32
+ The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
33
+ are used to encode conditional distributions. Conditional probability
34
+ distributions can be derived or analytic; but currently the only
35
+ implementation of the ``ConditionalProbDistI`` interface is
36
+ ``ConditionalProbDist``, a derived distribution.
37
+
38
+ """
39
+
40
+ import array
41
+ import math
42
+ import random
43
+ import warnings
44
+ from abc import ABCMeta, abstractmethod
45
+ from collections import Counter, defaultdict
46
+ from functools import reduce
47
+
48
+ from nltk.internals import raise_unorderable_types
49
+
50
+ _NINF = float("-1e300")
51
+
52
+ ##//////////////////////////////////////////////////////
53
+ ## Frequency Distributions
54
+ ##//////////////////////////////////////////////////////
55
+
56
+
57
+ class FreqDist(Counter):
58
+ """
59
+ A frequency distribution for the outcomes of an experiment. A
60
+ frequency distribution records the number of times each outcome of
61
+ an experiment has occurred. For example, a frequency distribution
62
+ could be used to record the frequency of each word type in a
63
+ document. Formally, a frequency distribution can be defined as a
64
+ function mapping from each sample to the number of times that
65
+ sample occurred as an outcome.
66
+
67
+ Frequency distributions are generally constructed by running a
68
+ number of experiments, and incrementing the count for a sample
69
+ every time it is an outcome of an experiment. For example, the
70
+ following code will produce a frequency distribution that encodes
71
+ how often each word occurs in a text:
72
+
73
+ >>> from nltk.tokenize import word_tokenize
74
+ >>> from nltk.probability import FreqDist
75
+ >>> sent = 'This is an example sentence'
76
+ >>> fdist = FreqDist()
77
+ >>> for word in word_tokenize(sent):
78
+ ... fdist[word.lower()] += 1
79
+
80
+ An equivalent way to do this is with the initializer:
81
+
82
+ >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
83
+
84
+ """
85
+
86
+ def __init__(self, samples=None):
87
+ """
88
+ Construct a new frequency distribution. If ``samples`` is
89
+ given, then the frequency distribution will be initialized
90
+ with the count of each object in ``samples``; otherwise, it
91
+ will be initialized to be empty.
92
+
93
+ In particular, ``FreqDist()`` returns an empty frequency
94
+ distribution; and ``FreqDist(samples)`` first creates an empty
95
+ frequency distribution, and then calls ``update`` with the
96
+ list ``samples``.
97
+
98
+ :param samples: The samples to initialize the frequency
99
+ distribution with.
100
+ :type samples: Sequence
101
+ """
102
+ Counter.__init__(self, samples)
103
+
104
+ # Cached number of samples in this FreqDist
105
+ self._N = None
106
+
107
+ def N(self):
108
+ """
109
+ Return the total number of sample outcomes that have been
110
+ recorded by this FreqDist. For the number of unique
111
+ sample values (or bins) with counts greater than zero, use
112
+ ``FreqDist.B()``.
113
+
114
+ :rtype: int
115
+ """
116
+ if self._N is None:
117
+ # Not already cached, or cache has been invalidated
118
+ self._N = sum(self.values())
119
+ return self._N
120
+
121
+ def __setitem__(self, key, val):
122
+ """
123
+ Override ``Counter.__setitem__()`` to invalidate the cached N
124
+ """
125
+ self._N = None
126
+ super().__setitem__(key, val)
127
+
128
+ def __delitem__(self, key):
129
+ """
130
+ Override ``Counter.__delitem__()`` to invalidate the cached N
131
+ """
132
+ self._N = None
133
+ super().__delitem__(key)
134
+
135
+ def update(self, *args, **kwargs):
136
+ """
137
+ Override ``Counter.update()`` to invalidate the cached N
138
+ """
139
+ self._N = None
140
+ super().update(*args, **kwargs)
141
+
142
+ def setdefault(self, key, val):
143
+ """
144
+ Override ``Counter.setdefault()`` to invalidate the cached N
145
+ """
146
+ self._N = None
147
+ super().setdefault(key, val)
148
+
149
+ def B(self):
150
+ """
151
+ Return the total number of sample values (or "bins") that
152
+ have counts greater than zero. For the total
153
+ number of sample outcomes recorded, use ``FreqDist.N()``.
154
+ (FreqDist.B() is the same as len(FreqDist).)
155
+
156
+ :rtype: int
157
+ """
158
+ return len(self)
159
+
160
+ def hapaxes(self):
161
+ """
162
+ Return a list of all samples that occur once (hapax legomena)
163
+
164
+ :rtype: list
165
+ """
166
+ return [item for item in self if self[item] == 1]
167
+
168
+ def Nr(self, r, bins=None):
169
+ return self.r_Nr(bins)[r]
170
+
171
+ def r_Nr(self, bins=None):
172
+ """
173
+ Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
174
+
175
+ :type bins: int
176
+ :param bins: The number of possible sample outcomes. ``bins``
177
+ is used to calculate Nr(0). In particular, Nr(0) is
178
+ ``bins-self.B()``. If ``bins`` is not specified, it
179
+ defaults to ``self.B()`` (so Nr(0) will be 0).
180
+ :rtype: int
181
+ """
182
+
183
+ _r_Nr = defaultdict(int)
184
+ for count in self.values():
185
+ _r_Nr[count] += 1
186
+
187
+ # Special case for Nr[0]:
188
+ _r_Nr[0] = bins - self.B() if bins is not None else 0
189
+
190
+ return _r_Nr
191
+
192
+ def _cumulative_frequencies(self, samples):
193
+ """
194
+ Return the cumulative frequencies of the specified samples.
195
+ If no samples are specified, all counts are returned, starting
196
+ with the largest.
197
+
198
+ :param samples: the samples whose frequencies should be returned.
199
+ :type samples: any
200
+ :rtype: list(float)
201
+ """
202
+ cf = 0.0
203
+ for sample in samples:
204
+ cf += self[sample]
205
+ yield cf
206
+
207
+ # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
208
+ # here, freq() does probs
209
+ def freq(self, sample):
210
+ """
211
+ Return the frequency of a given sample. The frequency of a
212
+ sample is defined as the count of that sample divided by the
213
+ total number of sample outcomes that have been recorded by
214
+ this FreqDist. The count of a sample is defined as the
215
+ number of times that sample outcome was recorded by this
216
+ FreqDist. Frequencies are always real numbers in the range
217
+ [0, 1].
218
+
219
+ :param sample: the sample whose frequency
220
+ should be returned.
221
+ :type sample: any
222
+ :rtype: float
223
+ """
224
+ n = self.N()
225
+ if n == 0:
226
+ return 0
227
+ return self[sample] / n
228
+
229
+ def max(self):
230
+ """
231
+ Return the sample with the greatest number of outcomes in this
232
+ frequency distribution. If two or more samples have the same
233
+ number of outcomes, return one of them; which sample is
234
+ returned is undefined. If no outcomes have occurred in this
235
+ frequency distribution, return None.
236
+
237
+ :return: The sample with the maximum number of outcomes in this
238
+ frequency distribution.
239
+ :rtype: any or None
240
+ """
241
+ if len(self) == 0:
242
+ raise ValueError(
243
+ "A FreqDist must have at least one sample before max is defined."
244
+ )
245
+ return self.most_common(1)[0][0]
246
+
247
+ def plot(
248
+ self, *args, title="", cumulative=False, percents=False, show=True, **kwargs
249
+ ):
250
+ """
251
+ Plot samples from the frequency distribution
252
+ displaying the most frequent sample first. If an integer
253
+ parameter is supplied, stop after this many samples have been
254
+ plotted. For a cumulative plot, specify cumulative=True. Additional
255
+ ``**kwargs`` are passed to matplotlib's plot function.
256
+ (Requires Matplotlib to be installed.)
257
+
258
+ :param title: The title for the graph.
259
+ :type title: str
260
+ :param cumulative: Whether the plot is cumulative. (default = False)
261
+ :type cumulative: bool
262
+ :param percents: Whether the plot uses percents instead of counts. (default = False)
263
+ :type percents: bool
264
+ :param show: Whether to show the plot, or only return the ax.
265
+ :type show: bool
266
+ """
267
+ try:
268
+ import matplotlib.pyplot as plt
269
+ except ImportError as e:
270
+ raise ValueError(
271
+ "The plot function requires matplotlib to be installed."
272
+ "See https://matplotlib.org/"
273
+ ) from e
274
+
275
+ if len(args) == 0:
276
+ args = [len(self)]
277
+ samples = [item for item, _ in self.most_common(*args)]
278
+
279
+ if cumulative:
280
+ freqs = list(self._cumulative_frequencies(samples))
281
+ ylabel = "Cumulative "
282
+ else:
283
+ freqs = [self[sample] for sample in samples]
284
+ ylabel = ""
285
+
286
+ if percents:
287
+ freqs = [f / self.N() * 100 for f in freqs]
288
+ ylabel += "Percents"
289
+ else:
290
+ ylabel += "Counts"
291
+
292
+ ax = plt.gca()
293
+ ax.grid(True, color="silver")
294
+
295
+ if "linewidth" not in kwargs:
296
+ kwargs["linewidth"] = 2
297
+ if title:
298
+ ax.set_title(title)
299
+
300
+ ax.plot(freqs, **kwargs)
301
+ ax.set_xticks(range(len(samples)))
302
+ ax.set_xticklabels([str(s) for s in samples], rotation=90)
303
+ ax.set_xlabel("Samples")
304
+ ax.set_ylabel(ylabel)
305
+
306
+ if show:
307
+ plt.show()
308
+
309
+ return ax
310
+
311
+ def tabulate(self, *args, **kwargs):
312
+ """
313
+ Tabulate the given samples from the frequency distribution (cumulative),
314
+ displaying the most frequent sample first. If an integer
315
+ parameter is supplied, stop after this many samples have been
316
+ plotted.
317
+
318
+ :param samples: The samples to plot (default is all samples)
319
+ :type samples: list
320
+ :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
321
+ :type title: bool
322
+ """
323
+ if len(args) == 0:
324
+ args = [len(self)]
325
+ samples = _get_kwarg(
326
+ kwargs, "samples", [item for item, _ in self.most_common(*args)]
327
+ )
328
+
329
+ cumulative = _get_kwarg(kwargs, "cumulative", False)
330
+ if cumulative:
331
+ freqs = list(self._cumulative_frequencies(samples))
332
+ else:
333
+ freqs = [self[sample] for sample in samples]
334
+ # percents = [f * 100 for f in freqs] only in ProbDist?
335
+
336
+ width = max(len(f"{s}") for s in samples)
337
+ width = max(width, max(len("%d" % f) for f in freqs))
338
+
339
+ for i in range(len(samples)):
340
+ print("%*s" % (width, samples[i]), end=" ")
341
+ print()
342
+ for i in range(len(samples)):
343
+ print("%*d" % (width, freqs[i]), end=" ")
344
+ print()
345
+
346
+ def copy(self):
347
+ """
348
+ Create a copy of this frequency distribution.
349
+
350
+ :rtype: FreqDist
351
+ """
352
+ return self.__class__(self)
353
+
354
+ # Mathematical operatiors
355
+
356
+ def __add__(self, other):
357
+ """
358
+ Add counts from two counters.
359
+
360
+ >>> FreqDist('abbb') + FreqDist('bcc')
361
+ FreqDist({'b': 4, 'c': 2, 'a': 1})
362
+
363
+ """
364
+ return self.__class__(super().__add__(other))
365
+
366
+ def __sub__(self, other):
367
+ """
368
+ Subtract count, but keep only results with positive counts.
369
+
370
+ >>> FreqDist('abbbc') - FreqDist('bccd')
371
+ FreqDist({'b': 2, 'a': 1})
372
+
373
+ """
374
+ return self.__class__(super().__sub__(other))
375
+
376
+ def __or__(self, other):
377
+ """
378
+ Union is the maximum of value in either of the input counters.
379
+
380
+ >>> FreqDist('abbb') | FreqDist('bcc')
381
+ FreqDist({'b': 3, 'c': 2, 'a': 1})
382
+
383
+ """
384
+ return self.__class__(super().__or__(other))
385
+
386
+ def __and__(self, other):
387
+ """
388
+ Intersection is the minimum of corresponding counts.
389
+
390
+ >>> FreqDist('abbb') & FreqDist('bcc')
391
+ FreqDist({'b': 1})
392
+
393
+ """
394
+ return self.__class__(super().__and__(other))
395
+
396
+ def __le__(self, other):
397
+ """
398
+ Returns True if this frequency distribution is a subset of the other
399
+ and for no key the value exceeds the value of the same key from
400
+ the other frequency distribution.
401
+
402
+ The <= operator forms partial order and satisfying the axioms
403
+ reflexivity, antisymmetry and transitivity.
404
+
405
+ >>> FreqDist('a') <= FreqDist('a')
406
+ True
407
+ >>> a = FreqDist('abc')
408
+ >>> b = FreqDist('aabc')
409
+ >>> (a <= b, b <= a)
410
+ (True, False)
411
+ >>> FreqDist('a') <= FreqDist('abcd')
412
+ True
413
+ >>> FreqDist('abc') <= FreqDist('xyz')
414
+ False
415
+ >>> FreqDist('xyz') <= FreqDist('abc')
416
+ False
417
+ >>> c = FreqDist('a')
418
+ >>> d = FreqDist('aa')
419
+ >>> e = FreqDist('aaa')
420
+ >>> c <= d and d <= e and c <= e
421
+ True
422
+ """
423
+ if not isinstance(other, FreqDist):
424
+ raise_unorderable_types("<=", self, other)
425
+ return set(self).issubset(other) and all(
426
+ self[key] <= other[key] for key in self
427
+ )
428
+
429
+ def __ge__(self, other):
430
+ if not isinstance(other, FreqDist):
431
+ raise_unorderable_types(">=", self, other)
432
+ return set(self).issuperset(other) and all(
433
+ self[key] >= other[key] for key in other
434
+ )
435
+
436
+ __lt__ = lambda self, other: self <= other and not self == other
437
+ __gt__ = lambda self, other: self >= other and not self == other
438
+
439
+ def __repr__(self):
440
+ """
441
+ Return a string representation of this FreqDist.
442
+
443
+ :rtype: string
444
+ """
445
+ return self.pformat()
446
+
447
+ def pprint(self, maxlen=10, stream=None):
448
+ """
449
+ Print a string representation of this FreqDist to 'stream'
450
+
451
+ :param maxlen: The maximum number of items to print
452
+ :type maxlen: int
453
+ :param stream: The stream to print to. stdout by default
454
+ """
455
+ print(self.pformat(maxlen=maxlen), file=stream)
456
+
457
+ def pformat(self, maxlen=10):
458
+ """
459
+ Return a string representation of this FreqDist.
460
+
461
+ :param maxlen: The maximum number of items to display
462
+ :type maxlen: int
463
+ :rtype: string
464
+ """
465
+ items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)]
466
+ if len(self) > maxlen:
467
+ items.append("...")
468
+ return "FreqDist({{{0}}})".format(", ".join(items))
469
+
470
+ def __str__(self):
471
+ """
472
+ Return a string representation of this FreqDist.
473
+
474
+ :rtype: string
475
+ """
476
+ return "<FreqDist with %d samples and %d outcomes>" % (len(self), self.N())
477
+
478
+ def __iter__(self):
479
+ """
480
+ Return an iterator which yields tokens ordered by frequency.
481
+
482
+ :rtype: iterator
483
+ """
484
+ for token, _ in self.most_common(self.B()):
485
+ yield token
486
+
487
+
488
+ ##//////////////////////////////////////////////////////
489
+ ## Probability Distributions
490
+ ##//////////////////////////////////////////////////////
491
+
492
+
493
+ class ProbDistI(metaclass=ABCMeta):
494
+ """
495
+ A probability distribution for the outcomes of an experiment. A
496
+ probability distribution specifies how likely it is that an
497
+ experiment will have any given outcome. For example, a
498
+ probability distribution could be used to predict the probability
499
+ that a token in a document will have a given type. Formally, a
500
+ probability distribution can be defined as a function mapping from
501
+ samples to nonnegative real numbers, such that the sum of every
502
+ number in the function's range is 1.0. A ``ProbDist`` is often
503
+ used to model the probability distribution of the experiment used
504
+ to generate a frequency distribution.
505
+ """
506
+
507
+ SUM_TO_ONE = True
508
+ """True if the probabilities of the samples in this probability
509
+ distribution will always sum to one."""
510
+
511
+ @abstractmethod
512
+ def __init__(self):
513
+ """
514
+ Classes inheriting from ProbDistI should implement __init__.
515
+ """
516
+
517
+ @abstractmethod
518
+ def prob(self, sample):
519
+ """
520
+ Return the probability for a given sample. Probabilities
521
+ are always real numbers in the range [0, 1].
522
+
523
+ :param sample: The sample whose probability
524
+ should be returned.
525
+ :type sample: any
526
+ :rtype: float
527
+ """
528
+
529
+ def logprob(self, sample):
530
+ """
531
+ Return the base 2 logarithm of the probability for a given sample.
532
+
533
+ :param sample: The sample whose probability
534
+ should be returned.
535
+ :type sample: any
536
+ :rtype: float
537
+ """
538
+ # Default definition, in terms of prob()
539
+ p = self.prob(sample)
540
+ return math.log(p, 2) if p != 0 else _NINF
541
+
542
+ @abstractmethod
543
+ def max(self):
544
+ """
545
+ Return the sample with the greatest probability. If two or
546
+ more samples have the same probability, return one of them;
547
+ which sample is returned is undefined.
548
+
549
+ :rtype: any
550
+ """
551
+
552
+ @abstractmethod
553
+ def samples(self):
554
+ """
555
+ Return a list of all samples that have nonzero probabilities.
556
+ Use ``prob`` to find the probability of each sample.
557
+
558
+ :rtype: list
559
+ """
560
+
561
+ # cf self.SUM_TO_ONE
562
+ def discount(self):
563
+ """
564
+ Return the ratio by which counts are discounted on average: c*/c
565
+
566
+ :rtype: float
567
+ """
568
+ return 0.0
569
+
570
+ # Subclasses should define more efficient implementations of this,
571
+ # where possible.
572
+ def generate(self):
573
+ """
574
+ Return a randomly selected sample from this probability distribution.
575
+ The probability of returning each sample ``samp`` is equal to
576
+ ``self.prob(samp)``.
577
+ """
578
+ p = random.random()
579
+ p_init = p
580
+ for sample in self.samples():
581
+ p -= self.prob(sample)
582
+ if p <= 0:
583
+ return sample
584
+ # allow for some rounding error:
585
+ if p < 0.0001:
586
+ return sample
587
+ # we *should* never get here
588
+ if self.SUM_TO_ONE:
589
+ warnings.warn(
590
+ "Probability distribution %r sums to %r; generate()"
591
+ " is returning an arbitrary sample." % (self, p_init - p)
592
+ )
593
+ return random.choice(list(self.samples()))
594
+
595
+
596
+ class UniformProbDist(ProbDistI):
597
+ """
598
+ A probability distribution that assigns equal probability to each
599
+ sample in a given set; and a zero probability to all other
600
+ samples.
601
+ """
602
+
603
+ def __init__(self, samples):
604
+ """
605
+ Construct a new uniform probability distribution, that assigns
606
+ equal probability to each sample in ``samples``.
607
+
608
+ :param samples: The samples that should be given uniform
609
+ probability.
610
+ :type samples: list
611
+ :raise ValueError: If ``samples`` is empty.
612
+ """
613
+ if len(samples) == 0:
614
+ raise ValueError(
615
+ "A Uniform probability distribution must " + "have at least one sample."
616
+ )
617
+ self._sampleset = set(samples)
618
+ self._prob = 1.0 / len(self._sampleset)
619
+ self._samples = list(self._sampleset)
620
+
621
+ def prob(self, sample):
622
+ return self._prob if sample in self._sampleset else 0
623
+
624
+ def max(self):
625
+ return self._samples[0]
626
+
627
+ def samples(self):
628
+ return self._samples
629
+
630
+ def __repr__(self):
631
+ return "<UniformProbDist with %d samples>" % len(self._sampleset)
632
+
633
+
634
+ class RandomProbDist(ProbDistI):
635
+ """
636
+ Generates a random probability distribution whereby each sample
637
+ will be between 0 and 1 with equal probability (uniform random distribution.
638
+ Also called a continuous uniform distribution).
639
+ """
640
+
641
+ def __init__(self, samples):
642
+ if len(samples) == 0:
643
+ raise ValueError(
644
+ "A probability distribution must " + "have at least one sample."
645
+ )
646
+ self._probs = self.unirand(samples)
647
+ self._samples = list(self._probs.keys())
648
+
649
+ @classmethod
650
+ def unirand(cls, samples):
651
+ """
652
+ The key function that creates a randomized initial distribution
653
+ that still sums to 1. Set as a dictionary of prob values so that
654
+ it can still be passed to MutableProbDist and called with identical
655
+ syntax to UniformProbDist
656
+ """
657
+ samples = set(samples)
658
+ randrow = [random.random() for i in range(len(samples))]
659
+ total = sum(randrow)
660
+ for i, x in enumerate(randrow):
661
+ randrow[i] = x / total
662
+
663
+ total = sum(randrow)
664
+ if total != 1:
665
+ # this difference, if present, is so small (near NINF) that it
666
+ # can be subtracted from any element without risking probs not (0 1)
667
+ randrow[-1] -= total - 1
668
+
669
+ return {s: randrow[i] for i, s in enumerate(samples)}
670
+
671
+ def max(self):
672
+ if not hasattr(self, "_max"):
673
+ self._max = max((p, v) for (v, p) in self._probs.items())[1]
674
+ return self._max
675
+
676
+ def prob(self, sample):
677
+ return self._probs.get(sample, 0)
678
+
679
+ def samples(self):
680
+ return self._samples
681
+
682
+ def __repr__(self):
683
+ return "<RandomUniformProbDist with %d samples>" % len(self._probs)
684
+
685
+
686
+ class DictionaryProbDist(ProbDistI):
687
+ """
688
+ A probability distribution whose probabilities are directly
689
+ specified by a given dictionary. The given dictionary maps
690
+ samples to probabilities.
691
+ """
692
+
693
+ def __init__(self, prob_dict=None, log=False, normalize=False):
694
+ """
695
+ Construct a new probability distribution from the given
696
+ dictionary, which maps values to probabilities (or to log
697
+ probabilities, if ``log`` is true). If ``normalize`` is
698
+ true, then the probability values are scaled by a constant
699
+ factor such that they sum to 1.
700
+
701
+ If called without arguments, the resulting probability
702
+ distribution assigns zero probability to all values.
703
+ """
704
+
705
+ self._prob_dict = prob_dict.copy() if prob_dict is not None else {}
706
+ self._log = log
707
+
708
+ # Normalize the distribution, if requested.
709
+ if normalize:
710
+ if len(prob_dict) == 0:
711
+ raise ValueError(
712
+ "A DictionaryProbDist must have at least one sample "
713
+ + "before it can be normalized."
714
+ )
715
+ if log:
716
+ value_sum = sum_logs(list(self._prob_dict.values()))
717
+ if value_sum <= _NINF:
718
+ logp = math.log(1.0 / len(prob_dict), 2)
719
+ for x in prob_dict:
720
+ self._prob_dict[x] = logp
721
+ else:
722
+ for (x, p) in self._prob_dict.items():
723
+ self._prob_dict[x] -= value_sum
724
+ else:
725
+ value_sum = sum(self._prob_dict.values())
726
+ if value_sum == 0:
727
+ p = 1.0 / len(prob_dict)
728
+ for x in prob_dict:
729
+ self._prob_dict[x] = p
730
+ else:
731
+ norm_factor = 1.0 / value_sum
732
+ for (x, p) in self._prob_dict.items():
733
+ self._prob_dict[x] *= norm_factor
734
+
735
+ def prob(self, sample):
736
+ if self._log:
737
+ return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0
738
+ else:
739
+ return self._prob_dict.get(sample, 0)
740
+
741
+ def logprob(self, sample):
742
+ if self._log:
743
+ return self._prob_dict.get(sample, _NINF)
744
+ else:
745
+ if sample not in self._prob_dict:
746
+ return _NINF
747
+ elif self._prob_dict[sample] == 0:
748
+ return _NINF
749
+ else:
750
+ return math.log(self._prob_dict[sample], 2)
751
+
752
+ def max(self):
753
+ if not hasattr(self, "_max"):
754
+ self._max = max((p, v) for (v, p) in self._prob_dict.items())[1]
755
+ return self._max
756
+
757
+ def samples(self):
758
+ return self._prob_dict.keys()
759
+
760
+ def __repr__(self):
761
+ return "<ProbDist with %d samples>" % len(self._prob_dict)
762
+
763
+
764
+ class MLEProbDist(ProbDistI):
765
+ """
766
+ The maximum likelihood estimate for the probability distribution
767
+ of the experiment used to generate a frequency distribution. The
768
+ "maximum likelihood estimate" approximates the probability of
769
+ each sample as the frequency of that sample in the frequency
770
+ distribution.
771
+ """
772
+
773
+ def __init__(self, freqdist, bins=None):
774
+ """
775
+ Use the maximum likelihood estimate to create a probability
776
+ distribution for the experiment used to generate ``freqdist``.
777
+
778
+ :type freqdist: FreqDist
779
+ :param freqdist: The frequency distribution that the
780
+ probability estimates should be based on.
781
+ """
782
+ self._freqdist = freqdist
783
+
784
+ def freqdist(self):
785
+ """
786
+ Return the frequency distribution that this probability
787
+ distribution is based on.
788
+
789
+ :rtype: FreqDist
790
+ """
791
+ return self._freqdist
792
+
793
+ def prob(self, sample):
794
+ return self._freqdist.freq(sample)
795
+
796
+ def max(self):
797
+ return self._freqdist.max()
798
+
799
+ def samples(self):
800
+ return self._freqdist.keys()
801
+
802
+ def __repr__(self):
803
+ """
804
+ :rtype: str
805
+ :return: A string representation of this ``ProbDist``.
806
+ """
807
+ return "<MLEProbDist based on %d samples>" % self._freqdist.N()
808
+
809
+
810
+ class LidstoneProbDist(ProbDistI):
811
+ """
812
+ The Lidstone estimate for the probability distribution of the
813
+ experiment used to generate a frequency distribution. The
814
+ "Lidstone estimate" is parameterized by a real number *gamma*,
815
+ which typically ranges from 0 to 1. The Lidstone estimate
816
+ approximates the probability of a sample with count *c* from an
817
+ experiment with *N* outcomes and *B* bins as
818
+ ``c+gamma)/(N+B*gamma)``. This is equivalent to adding
819
+ *gamma* to the count for each bin, and taking the maximum
820
+ likelihood estimate of the resulting frequency distribution.
821
+ """
822
+
823
+ SUM_TO_ONE = False
824
+
825
+ def __init__(self, freqdist, gamma, bins=None):
826
+ """
827
+ Use the Lidstone estimate to create a probability distribution
828
+ for the experiment used to generate ``freqdist``.
829
+
830
+ :type freqdist: FreqDist
831
+ :param freqdist: The frequency distribution that the
832
+ probability estimates should be based on.
833
+ :type gamma: float
834
+ :param gamma: A real number used to parameterize the
835
+ estimate. The Lidstone estimate is equivalent to adding
836
+ *gamma* to the count for each bin, and taking the
837
+ maximum likelihood estimate of the resulting frequency
838
+ distribution.
839
+ :type bins: int
840
+ :param bins: The number of sample values that can be generated
841
+ by the experiment that is described by the probability
842
+ distribution. This value must be correctly set for the
843
+ probabilities of the sample values to sum to one. If
844
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
845
+ """
846
+ if (bins == 0) or (bins is None and freqdist.N() == 0):
847
+ name = self.__class__.__name__[:-8]
848
+ raise ValueError(
849
+ "A %s probability distribution " % name + "must have at least one bin."
850
+ )
851
+ if (bins is not None) and (bins < freqdist.B()):
852
+ name = self.__class__.__name__[:-8]
853
+ raise ValueError(
854
+ "\nThe number of bins in a %s distribution " % name
855
+ + "(%d) must be greater than or equal to\n" % bins
856
+ + "the number of bins in the FreqDist used "
857
+ + "to create it (%d)." % freqdist.B()
858
+ )
859
+
860
+ self._freqdist = freqdist
861
+ self._gamma = float(gamma)
862
+ self._N = self._freqdist.N()
863
+
864
+ if bins is None:
865
+ bins = freqdist.B()
866
+ self._bins = bins
867
+
868
+ self._divisor = self._N + bins * gamma
869
+ if self._divisor == 0.0:
870
+ # In extreme cases we force the probability to be 0,
871
+ # which it will be, since the count will be 0:
872
+ self._gamma = 0
873
+ self._divisor = 1
874
+
875
+ def freqdist(self):
876
+ """
877
+ Return the frequency distribution that this probability
878
+ distribution is based on.
879
+
880
+ :rtype: FreqDist
881
+ """
882
+ return self._freqdist
883
+
884
+ def prob(self, sample):
885
+ c = self._freqdist[sample]
886
+ return (c + self._gamma) / self._divisor
887
+
888
+ def max(self):
889
+ # For Lidstone distributions, probability is monotonic with
890
+ # frequency, so the most probable sample is the one that
891
+ # occurs most frequently.
892
+ return self._freqdist.max()
893
+
894
+ def samples(self):
895
+ return self._freqdist.keys()
896
+
897
+ def discount(self):
898
+ gb = self._gamma * self._bins
899
+ return gb / (self._N + gb)
900
+
901
+ def __repr__(self):
902
+ """
903
+ Return a string representation of this ``ProbDist``.
904
+
905
+ :rtype: str
906
+ """
907
+ return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
908
+
909
+
910
+ class LaplaceProbDist(LidstoneProbDist):
911
+ """
912
+ The Laplace estimate for the probability distribution of the
913
+ experiment used to generate a frequency distribution. The
914
+ "Laplace estimate" approximates the probability of a sample with
915
+ count *c* from an experiment with *N* outcomes and *B* bins as
916
+ *(c+1)/(N+B)*. This is equivalent to adding one to the count for
917
+ each bin, and taking the maximum likelihood estimate of the
918
+ resulting frequency distribution.
919
+ """
920
+
921
+ def __init__(self, freqdist, bins=None):
922
+ """
923
+ Use the Laplace estimate to create a probability distribution
924
+ for the experiment used to generate ``freqdist``.
925
+
926
+ :type freqdist: FreqDist
927
+ :param freqdist: The frequency distribution that the
928
+ probability estimates should be based on.
929
+ :type bins: int
930
+ :param bins: The number of sample values that can be generated
931
+ by the experiment that is described by the probability
932
+ distribution. This value must be correctly set for the
933
+ probabilities of the sample values to sum to one. If
934
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
935
+ """
936
+ LidstoneProbDist.__init__(self, freqdist, 1, bins)
937
+
938
+ def __repr__(self):
939
+ """
940
+ :rtype: str
941
+ :return: A string representation of this ``ProbDist``.
942
+ """
943
+ return "<LaplaceProbDist based on %d samples>" % self._freqdist.N()
944
+
945
+
946
+ class ELEProbDist(LidstoneProbDist):
947
+ """
948
+ The expected likelihood estimate for the probability distribution
949
+ of the experiment used to generate a frequency distribution. The
950
+ "expected likelihood estimate" approximates the probability of a
951
+ sample with count *c* from an experiment with *N* outcomes and
952
+ *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
953
+ to the count for each bin, and taking the maximum likelihood
954
+ estimate of the resulting frequency distribution.
955
+ """
956
+
957
+ def __init__(self, freqdist, bins=None):
958
+ """
959
+ Use the expected likelihood estimate to create a probability
960
+ distribution for the experiment used to generate ``freqdist``.
961
+
962
+ :type freqdist: FreqDist
963
+ :param freqdist: The frequency distribution that the
964
+ probability estimates should be based on.
965
+ :type bins: int
966
+ :param bins: The number of sample values that can be generated
967
+ by the experiment that is described by the probability
968
+ distribution. This value must be correctly set for the
969
+ probabilities of the sample values to sum to one. If
970
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
971
+ """
972
+ LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
973
+
974
+ def __repr__(self):
975
+ """
976
+ Return a string representation of this ``ProbDist``.
977
+
978
+ :rtype: str
979
+ """
980
+ return "<ELEProbDist based on %d samples>" % self._freqdist.N()
981
+
982
+
983
+ class HeldoutProbDist(ProbDistI):
984
+ """
985
+ The heldout estimate for the probability distribution of the
986
+ experiment used to generate two frequency distributions. These
987
+ two frequency distributions are called the "heldout frequency
988
+ distribution" and the "base frequency distribution." The
989
+ "heldout estimate" uses uses the "heldout frequency
990
+ distribution" to predict the probability of each sample, given its
991
+ frequency in the "base frequency distribution".
992
+
993
+ In particular, the heldout estimate approximates the probability
994
+ for a sample that occurs *r* times in the base distribution as
995
+ the average frequency in the heldout distribution of all samples
996
+ that occur *r* times in the base distribution.
997
+
998
+ This average frequency is *Tr[r]/(Nr[r].N)*, where:
999
+
1000
+ - *Tr[r]* is the total count in the heldout distribution for
1001
+ all samples that occur *r* times in the base distribution.
1002
+ - *Nr[r]* is the number of samples that occur *r* times in
1003
+ the base distribution.
1004
+ - *N* is the number of outcomes recorded by the heldout
1005
+ frequency distribution.
1006
+
1007
+ In order to increase the efficiency of the ``prob`` member
1008
+ function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
1009
+ when the ``HeldoutProbDist`` is created.
1010
+
1011
+ :type _estimate: list(float)
1012
+ :ivar _estimate: A list mapping from *r*, the number of
1013
+ times that a sample occurs in the base distribution, to the
1014
+ probability estimate for that sample. ``_estimate[r]`` is
1015
+ calculated by finding the average frequency in the heldout
1016
+ distribution of all samples that occur *r* times in the base
1017
+ distribution. In particular, ``_estimate[r]`` =
1018
+ *Tr[r]/(Nr[r].N)*.
1019
+ :type _max_r: int
1020
+ :ivar _max_r: The maximum number of times that any sample occurs
1021
+ in the base distribution. ``_max_r`` is used to decide how
1022
+ large ``_estimate`` must be.
1023
+ """
1024
+
1025
+ SUM_TO_ONE = False
1026
+
1027
+ def __init__(self, base_fdist, heldout_fdist, bins=None):
1028
+ """
1029
+ Use the heldout estimate to create a probability distribution
1030
+ for the experiment used to generate ``base_fdist`` and
1031
+ ``heldout_fdist``.
1032
+
1033
+ :type base_fdist: FreqDist
1034
+ :param base_fdist: The base frequency distribution.
1035
+ :type heldout_fdist: FreqDist
1036
+ :param heldout_fdist: The heldout frequency distribution.
1037
+ :type bins: int
1038
+ :param bins: The number of sample values that can be generated
1039
+ by the experiment that is described by the probability
1040
+ distribution. This value must be correctly set for the
1041
+ probabilities of the sample values to sum to one. If
1042
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
1043
+ """
1044
+
1045
+ self._base_fdist = base_fdist
1046
+ self._heldout_fdist = heldout_fdist
1047
+
1048
+ # The max number of times any sample occurs in base_fdist.
1049
+ self._max_r = base_fdist[base_fdist.max()]
1050
+
1051
+ # Calculate Tr, Nr, and N.
1052
+ Tr = self._calculate_Tr()
1053
+ r_Nr = base_fdist.r_Nr(bins)
1054
+ Nr = [r_Nr[r] for r in range(self._max_r + 1)]
1055
+ N = heldout_fdist.N()
1056
+
1057
+ # Use Tr, Nr, and N to compute the probability estimate for
1058
+ # each value of r.
1059
+ self._estimate = self._calculate_estimate(Tr, Nr, N)
1060
+
1061
+ def _calculate_Tr(self):
1062
+ """
1063
+ Return the list *Tr*, where *Tr[r]* is the total count in
1064
+ ``heldout_fdist`` for all samples that occur *r*
1065
+ times in ``base_fdist``.
1066
+
1067
+ :rtype: list(float)
1068
+ """
1069
+ Tr = [0.0] * (self._max_r + 1)
1070
+ for sample in self._heldout_fdist:
1071
+ r = self._base_fdist[sample]
1072
+ Tr[r] += self._heldout_fdist[sample]
1073
+ return Tr
1074
+
1075
+ def _calculate_estimate(self, Tr, Nr, N):
1076
+ """
1077
+ Return the list *estimate*, where *estimate[r]* is the probability
1078
+ estimate for any sample that occurs *r* times in the base frequency
1079
+ distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
1080
+ In the special case that *N[r]=0*, *estimate[r]* will never be used;
1081
+ so we define *estimate[r]=None* for those cases.
1082
+
1083
+ :rtype: list(float)
1084
+ :type Tr: list(float)
1085
+ :param Tr: the list *Tr*, where *Tr[r]* is the total count in
1086
+ the heldout distribution for all samples that occur *r*
1087
+ times in base distribution.
1088
+ :type Nr: list(float)
1089
+ :param Nr: The list *Nr*, where *Nr[r]* is the number of
1090
+ samples that occur *r* times in the base distribution.
1091
+ :type N: int
1092
+ :param N: The total number of outcomes recorded by the heldout
1093
+ frequency distribution.
1094
+ """
1095
+ estimate = []
1096
+ for r in range(self._max_r + 1):
1097
+ if Nr[r] == 0:
1098
+ estimate.append(None)
1099
+ else:
1100
+ estimate.append(Tr[r] / (Nr[r] * N))
1101
+ return estimate
1102
+
1103
+ def base_fdist(self):
1104
+ """
1105
+ Return the base frequency distribution that this probability
1106
+ distribution is based on.
1107
+
1108
+ :rtype: FreqDist
1109
+ """
1110
+ return self._base_fdist
1111
+
1112
+ def heldout_fdist(self):
1113
+ """
1114
+ Return the heldout frequency distribution that this
1115
+ probability distribution is based on.
1116
+
1117
+ :rtype: FreqDist
1118
+ """
1119
+ return self._heldout_fdist
1120
+
1121
+ def samples(self):
1122
+ return self._base_fdist.keys()
1123
+
1124
+ def prob(self, sample):
1125
+ # Use our precomputed probability estimate.
1126
+ r = self._base_fdist[sample]
1127
+ return self._estimate[r]
1128
+
1129
+ def max(self):
1130
+ # Note: the Heldout estimation is *not* necessarily monotonic;
1131
+ # so this implementation is currently broken. However, it
1132
+ # should give the right answer *most* of the time. :)
1133
+ return self._base_fdist.max()
1134
+
1135
+ def discount(self):
1136
+ raise NotImplementedError()
1137
+
1138
+ def __repr__(self):
1139
+ """
1140
+ :rtype: str
1141
+ :return: A string representation of this ``ProbDist``.
1142
+ """
1143
+ s = "<HeldoutProbDist: %d base samples; %d heldout samples>"
1144
+ return s % (self._base_fdist.N(), self._heldout_fdist.N())
1145
+
1146
+
1147
+ class CrossValidationProbDist(ProbDistI):
1148
+ """
1149
+ The cross-validation estimate for the probability distribution of
1150
+ the experiment used to generate a set of frequency distribution.
1151
+ The "cross-validation estimate" for the probability of a sample
1152
+ is found by averaging the held-out estimates for the sample in
1153
+ each pair of frequency distributions.
1154
+ """
1155
+
1156
+ SUM_TO_ONE = False
1157
+
1158
+ def __init__(self, freqdists, bins):
1159
+ """
1160
+ Use the cross-validation estimate to create a probability
1161
+ distribution for the experiment used to generate
1162
+ ``freqdists``.
1163
+
1164
+ :type freqdists: list(FreqDist)
1165
+ :param freqdists: A list of the frequency distributions
1166
+ generated by the experiment.
1167
+ :type bins: int
1168
+ :param bins: The number of sample values that can be generated
1169
+ by the experiment that is described by the probability
1170
+ distribution. This value must be correctly set for the
1171
+ probabilities of the sample values to sum to one. If
1172
+ ``bins`` is not specified, it defaults to ``freqdist.B()``.
1173
+ """
1174
+ self._freqdists = freqdists
1175
+
1176
+ # Create a heldout probability distribution for each pair of
1177
+ # frequency distributions in freqdists.
1178
+ self._heldout_probdists = []
1179
+ for fdist1 in freqdists:
1180
+ for fdist2 in freqdists:
1181
+ if fdist1 is not fdist2:
1182
+ probdist = HeldoutProbDist(fdist1, fdist2, bins)
1183
+ self._heldout_probdists.append(probdist)
1184
+
1185
+ def freqdists(self):
1186
+ """
1187
+ Return the list of frequency distributions that this ``ProbDist`` is based on.
1188
+
1189
+ :rtype: list(FreqDist)
1190
+ """
1191
+ return self._freqdists
1192
+
1193
+ def samples(self):
1194
+ # [xx] nb: this is not too efficient
1195
+ return set(sum((list(fd) for fd in self._freqdists), []))
1196
+
1197
+ def prob(self, sample):
1198
+ # Find the average probability estimate returned by each
1199
+ # heldout distribution.
1200
+ prob = 0.0
1201
+ for heldout_probdist in self._heldout_probdists:
1202
+ prob += heldout_probdist.prob(sample)
1203
+ return prob / len(self._heldout_probdists)
1204
+
1205
+ def discount(self):
1206
+ raise NotImplementedError()
1207
+
1208
+ def __repr__(self):
1209
+ """
1210
+ Return a string representation of this ``ProbDist``.
1211
+
1212
+ :rtype: str
1213
+ """
1214
+ return "<CrossValidationProbDist: %d-way>" % len(self._freqdists)
1215
+
1216
+
1217
+ class WittenBellProbDist(ProbDistI):
1218
+ """
1219
+ The Witten-Bell estimate of a probability distribution. This distribution
1220
+ allocates uniform probability mass to as yet unseen events by using the
1221
+ number of events that have only been seen once. The probability mass
1222
+ reserved for unseen events is equal to *T / (N + T)*
1223
+ where *T* is the number of observed event types and *N* is the total
1224
+ number of observed events. This equates to the maximum likelihood estimate
1225
+ of a new type event occurring. The remaining probability mass is discounted
1226
+ such that all probability estimates sum to one, yielding:
1227
+
1228
+ - *p = T / Z (N + T)*, if count = 0
1229
+ - *p = c / (N + T)*, otherwise
1230
+ """
1231
+
1232
+ def __init__(self, freqdist, bins=None):
1233
+ """
1234
+ Creates a distribution of Witten-Bell probability estimates. This
1235
+ distribution allocates uniform probability mass to as yet unseen
1236
+ events by using the number of events that have only been seen once. The
1237
+ probability mass reserved for unseen events is equal to *T / (N + T)*
1238
+ where *T* is the number of observed event types and *N* is the total
1239
+ number of observed events. This equates to the maximum likelihood
1240
+ estimate of a new type event occurring. The remaining probability mass
1241
+ is discounted such that all probability estimates sum to one,
1242
+ yielding:
1243
+
1244
+ - *p = T / Z (N + T)*, if count = 0
1245
+ - *p = c / (N + T)*, otherwise
1246
+
1247
+ The parameters *T* and *N* are taken from the ``freqdist`` parameter
1248
+ (the ``B()`` and ``N()`` values). The normalizing factor *Z* is
1249
+ calculated using these values along with the ``bins`` parameter.
1250
+
1251
+ :param freqdist: The frequency counts upon which to base the
1252
+ estimation.
1253
+ :type freqdist: FreqDist
1254
+ :param bins: The number of possible event types. This must be at least
1255
+ as large as the number of bins in the ``freqdist``. If None, then
1256
+ it's assumed to be equal to that of the ``freqdist``
1257
+ :type bins: int
1258
+ """
1259
+ assert bins is None or bins >= freqdist.B(), (
1260
+ "bins parameter must not be less than %d=freqdist.B()" % freqdist.B()
1261
+ )
1262
+ if bins is None:
1263
+ bins = freqdist.B()
1264
+ self._freqdist = freqdist
1265
+ self._T = self._freqdist.B()
1266
+ self._Z = bins - self._freqdist.B()
1267
+ self._N = self._freqdist.N()
1268
+ # self._P0 is P(0), precalculated for efficiency:
1269
+ if self._N == 0:
1270
+ # if freqdist is empty, we approximate P(0) by a UniformProbDist:
1271
+ self._P0 = 1.0 / self._Z
1272
+ else:
1273
+ self._P0 = self._T / (self._Z * (self._N + self._T))
1274
+
1275
+ def prob(self, sample):
1276
+ # inherit docs from ProbDistI
1277
+ c = self._freqdist[sample]
1278
+ return c / (self._N + self._T) if c != 0 else self._P0
1279
+
1280
+ def max(self):
1281
+ return self._freqdist.max()
1282
+
1283
+ def samples(self):
1284
+ return self._freqdist.keys()
1285
+
1286
+ def freqdist(self):
1287
+ return self._freqdist
1288
+
1289
+ def discount(self):
1290
+ raise NotImplementedError()
1291
+
1292
+ def __repr__(self):
1293
+ """
1294
+ Return a string representation of this ``ProbDist``.
1295
+
1296
+ :rtype: str
1297
+ """
1298
+ return "<WittenBellProbDist based on %d samples>" % self._freqdist.N()
1299
+
1300
+
1301
+ ##//////////////////////////////////////////////////////
1302
+ ## Good-Turing Probability Distributions
1303
+ ##//////////////////////////////////////////////////////
1304
+
1305
+ # Good-Turing frequency estimation was contributed by Alan Turing and
1306
+ # his statistical assistant I.J. Good, during their collaboration in
1307
+ # the WWII. It is a statistical technique for predicting the
1308
+ # probability of occurrence of objects belonging to an unknown number
1309
+ # of species, given past observations of such objects and their
1310
+ # species. (In drawing balls from an urn, the 'objects' would be balls
1311
+ # and the 'species' would be the distinct colors of the balls (finite
1312
+ # but unknown in number).
1313
+ #
1314
+ # Good-Turing method calculates the probability mass to assign to
1315
+ # events with zero or low counts based on the number of events with
1316
+ # higher counts. It does so by using the adjusted count *c\**:
1317
+ #
1318
+ # - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
1319
+ # - *things with frequency zero in training* = N(1) for c == 0
1320
+ #
1321
+ # where *c* is the original count, *N(i)* is the number of event types
1322
+ # observed with count *i*. We can think the count of unseen as the count
1323
+ # of frequency one (see Jurafsky & Martin 2nd Edition, p101).
1324
+ #
1325
+ # This method is problematic because the situation ``N(c+1) == 0``
1326
+ # is quite common in the original Good-Turing estimation; smoothing or
1327
+ # interpolation of *N(i)* values is essential in practice.
1328
+ #
1329
+ # Bill Gale and Geoffrey Sampson present a simple and effective approach,
1330
+ # Simple Good-Turing. As a smoothing curve they simply use a power curve:
1331
+ #
1332
+ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
1333
+ # relationship)
1334
+ #
1335
+ # They estimate a and b by simple linear regression technique on the
1336
+ # logarithmic form of the equation:
1337
+ #
1338
+ # log Nr = a + b*log(r)
1339
+ #
1340
+ # However, they suggest that such a simple curve is probably only
1341
+ # appropriate for high values of r. For low values of r, they use the
1342
+ # measured Nr directly. (see M&S, p.213)
1343
+ #
1344
+ # Gale and Sampson propose to use r while the difference between r and
1345
+ # r* is 1.96 greater than the standard deviation, and switch to r* if
1346
+ # it is less or equal:
1347
+ #
1348
+ # |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
1349
+ #
1350
+ # The 1.96 coefficient correspond to a 0.05 significance criterion,
1351
+ # some implementations can use a coefficient of 1.65 for a 0.1
1352
+ # significance criterion.
1353
+ #
1354
+
1355
+ ##//////////////////////////////////////////////////////
1356
+ ## Simple Good-Turing Probablity Distributions
1357
+ ##//////////////////////////////////////////////////////
1358
+
1359
+
1360
+ class SimpleGoodTuringProbDist(ProbDistI):
1361
+ """
1362
+ SimpleGoodTuring ProbDist approximates from frequency to frequency of
1363
+ frequency into a linear line under log space by linear regression.
1364
+ Details of Simple Good-Turing algorithm can be found in:
1365
+
1366
+ - Good Turing smoothing without tears" (Gale & Sampson 1995),
1367
+ Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
1368
+ - "Speech and Language Processing (Jurafsky & Martin),
1369
+ 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
1370
+ - https://www.grsampson.net/RGoodTur.html
1371
+
1372
+ Given a set of pair (xi, yi), where the xi denotes the frequency and
1373
+ yi denotes the frequency of frequency, we want to minimize their
1374
+ square variation. E(x) and E(y) represent the mean of xi and yi.
1375
+
1376
+ - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
1377
+ - intercept: a = E(y) - b.E(x)
1378
+ """
1379
+
1380
+ SUM_TO_ONE = False
1381
+
1382
+ def __init__(self, freqdist, bins=None):
1383
+ """
1384
+ :param freqdist: The frequency counts upon which to base the
1385
+ estimation.
1386
+ :type freqdist: FreqDist
1387
+ :param bins: The number of possible event types. This must be
1388
+ larger than the number of bins in the ``freqdist``. If None,
1389
+ then it's assumed to be equal to ``freqdist``.B() + 1
1390
+ :type bins: int
1391
+ """
1392
+ assert (
1393
+ bins is None or bins > freqdist.B()
1394
+ ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1)
1395
+ if bins is None:
1396
+ bins = freqdist.B() + 1
1397
+ self._freqdist = freqdist
1398
+ self._bins = bins
1399
+ r, nr = self._r_Nr()
1400
+ self.find_best_fit(r, nr)
1401
+ self._switch(r, nr)
1402
+ self._renormalize(r, nr)
1403
+
1404
+ def _r_Nr_non_zero(self):
1405
+ r_Nr = self._freqdist.r_Nr()
1406
+ del r_Nr[0]
1407
+ return r_Nr
1408
+
1409
+ def _r_Nr(self):
1410
+ """
1411
+ Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
1412
+ """
1413
+ nonzero = self._r_Nr_non_zero()
1414
+
1415
+ if not nonzero:
1416
+ return [], []
1417
+ return zip(*sorted(nonzero.items()))
1418
+
1419
+ def find_best_fit(self, r, nr):
1420
+ """
1421
+ Use simple linear regression to tune parameters self._slope and
1422
+ self._intercept in the log-log space based on count and Nr(count)
1423
+ (Work in log space to avoid floating point underflow.)
1424
+ """
1425
+ # For higher sample frequencies the data points becomes horizontal
1426
+ # along line Nr=1. To create a more evident linear model in log-log
1427
+ # space, we average positive Nr values with the surrounding zero
1428
+ # values. (Church and Gale, 1991)
1429
+
1430
+ if not r or not nr:
1431
+ # Empty r or nr?
1432
+ return
1433
+
1434
+ zr = []
1435
+ for j in range(len(r)):
1436
+ i = r[j - 1] if j > 0 else 0
1437
+ k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1]
1438
+ zr_ = 2.0 * nr[j] / (k - i)
1439
+ zr.append(zr_)
1440
+
1441
+ log_r = [math.log(i) for i in r]
1442
+ log_zr = [math.log(i) for i in zr]
1443
+
1444
+ xy_cov = x_var = 0.0
1445
+ x_mean = sum(log_r) / len(log_r)
1446
+ y_mean = sum(log_zr) / len(log_zr)
1447
+ for (x, y) in zip(log_r, log_zr):
1448
+ xy_cov += (x - x_mean) * (y - y_mean)
1449
+ x_var += (x - x_mean) ** 2
1450
+ self._slope = xy_cov / x_var if x_var != 0 else 0.0
1451
+ if self._slope >= -1:
1452
+ warnings.warn(
1453
+ "SimpleGoodTuring did not find a proper best fit "
1454
+ "line for smoothing probabilities of occurrences. "
1455
+ "The probability estimates are likely to be "
1456
+ "unreliable."
1457
+ )
1458
+ self._intercept = y_mean - self._slope * x_mean
1459
+
1460
+ def _switch(self, r, nr):
1461
+ """
1462
+ Calculate the r frontier where we must switch from Nr to Sr
1463
+ when estimating E[Nr].
1464
+ """
1465
+ for i, r_ in enumerate(r):
1466
+ if len(r) == i + 1 or r[i + 1] != r_ + 1:
1467
+ # We are at the end of r, or there is a gap in r
1468
+ self._switch_at = r_
1469
+ break
1470
+
1471
+ Sr = self.smoothedNr
1472
+ smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_)
1473
+ unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i]
1474
+
1475
+ std = math.sqrt(self._variance(r_, nr[i], nr[i + 1]))
1476
+ if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std:
1477
+ self._switch_at = r_
1478
+ break
1479
+
1480
+ def _variance(self, r, nr, nr_1):
1481
+ r = float(r)
1482
+ nr = float(nr)
1483
+ nr_1 = float(nr_1)
1484
+ return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
1485
+
1486
+ def _renormalize(self, r, nr):
1487
+ """
1488
+ It is necessary to renormalize all the probability estimates to
1489
+ ensure a proper probability distribution results. This can be done
1490
+ by keeping the estimate of the probability mass for unseen items as
1491
+ N(1)/N and renormalizing all the estimates for previously seen items
1492
+ (as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
1493
+ """
1494
+ prob_cov = 0.0
1495
+ for r_, nr_ in zip(r, nr):
1496
+ prob_cov += nr_ * self._prob_measure(r_)
1497
+ if prob_cov:
1498
+ self._renormal = (1 - self._prob_measure(0)) / prob_cov
1499
+
1500
+ def smoothedNr(self, r):
1501
+ """
1502
+ Return the number of samples with count r.
1503
+
1504
+ :param r: The amount of frequency.
1505
+ :type r: int
1506
+ :rtype: float
1507
+ """
1508
+
1509
+ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
1510
+ # relationship)
1511
+ # Estimate a and b by simple linear regression technique on
1512
+ # the logarithmic form of the equation: log Nr = a + b*log(r)
1513
+
1514
+ return math.exp(self._intercept + self._slope * math.log(r))
1515
+
1516
+ def prob(self, sample):
1517
+ """
1518
+ Return the sample's probability.
1519
+
1520
+ :param sample: sample of the event
1521
+ :type sample: str
1522
+ :rtype: float
1523
+ """
1524
+ count = self._freqdist[sample]
1525
+ p = self._prob_measure(count)
1526
+ if count == 0:
1527
+ if self._bins == self._freqdist.B():
1528
+ p = 0.0
1529
+ else:
1530
+ p = p / (self._bins - self._freqdist.B())
1531
+ else:
1532
+ p = p * self._renormal
1533
+ return p
1534
+
1535
+ def _prob_measure(self, count):
1536
+ if count == 0 and self._freqdist.N() == 0:
1537
+ return 1.0
1538
+ elif count == 0 and self._freqdist.N() != 0:
1539
+ return self._freqdist.Nr(1) / self._freqdist.N()
1540
+
1541
+ if self._switch_at > count:
1542
+ Er_1 = self._freqdist.Nr(count + 1)
1543
+ Er = self._freqdist.Nr(count)
1544
+ else:
1545
+ Er_1 = self.smoothedNr(count + 1)
1546
+ Er = self.smoothedNr(count)
1547
+
1548
+ r_star = (count + 1) * Er_1 / Er
1549
+ return r_star / self._freqdist.N()
1550
+
1551
+ def check(self):
1552
+ prob_sum = 0.0
1553
+ for i in range(0, len(self._Nr)):
1554
+ prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
1555
+ print("Probability Sum:", prob_sum)
1556
+ # assert prob_sum != 1.0, "probability sum should be one!"
1557
+
1558
+ def discount(self):
1559
+ """
1560
+ This function returns the total mass of probability transfers from the
1561
+ seen samples to the unseen samples.
1562
+ """
1563
+ return self.smoothedNr(1) / self._freqdist.N()
1564
+
1565
+ def max(self):
1566
+ return self._freqdist.max()
1567
+
1568
+ def samples(self):
1569
+ return self._freqdist.keys()
1570
+
1571
+ def freqdist(self):
1572
+ return self._freqdist
1573
+
1574
+ def __repr__(self):
1575
+ """
1576
+ Return a string representation of this ``ProbDist``.
1577
+
1578
+ :rtype: str
1579
+ """
1580
+ return "<SimpleGoodTuringProbDist based on %d samples>" % self._freqdist.N()
1581
+
1582
+
1583
+ class MutableProbDist(ProbDistI):
1584
+ """
1585
+ An mutable probdist where the probabilities may be easily modified. This
1586
+ simply copies an existing probdist, storing the probability values in a
1587
+ mutable dictionary and providing an update method.
1588
+ """
1589
+
1590
+ def __init__(self, prob_dist, samples, store_logs=True):
1591
+ """
1592
+ Creates the mutable probdist based on the given prob_dist and using
1593
+ the list of samples given. These values are stored as log
1594
+ probabilities if the store_logs flag is set.
1595
+
1596
+ :param prob_dist: the distribution from which to garner the
1597
+ probabilities
1598
+ :type prob_dist: ProbDist
1599
+ :param samples: the complete set of samples
1600
+ :type samples: sequence of any
1601
+ :param store_logs: whether to store the probabilities as logarithms
1602
+ :type store_logs: bool
1603
+ """
1604
+ self._samples = samples
1605
+ self._sample_dict = {samples[i]: i for i in range(len(samples))}
1606
+ self._data = array.array("d", [0.0]) * len(samples)
1607
+ for i in range(len(samples)):
1608
+ if store_logs:
1609
+ self._data[i] = prob_dist.logprob(samples[i])
1610
+ else:
1611
+ self._data[i] = prob_dist.prob(samples[i])
1612
+ self._logs = store_logs
1613
+
1614
+ def max(self):
1615
+ # inherit documentation
1616
+ return max((p, v) for (v, p) in self._sample_dict.items())[1]
1617
+
1618
+ def samples(self):
1619
+ # inherit documentation
1620
+ return self._samples
1621
+
1622
+ def prob(self, sample):
1623
+ # inherit documentation
1624
+ i = self._sample_dict.get(sample)
1625
+ if i is None:
1626
+ return 0.0
1627
+ return 2 ** (self._data[i]) if self._logs else self._data[i]
1628
+
1629
+ def logprob(self, sample):
1630
+ # inherit documentation
1631
+ i = self._sample_dict.get(sample)
1632
+ if i is None:
1633
+ return float("-inf")
1634
+ return self._data[i] if self._logs else math.log(self._data[i], 2)
1635
+
1636
+ def update(self, sample, prob, log=True):
1637
+ """
1638
+ Update the probability for the given sample. This may cause the object
1639
+ to stop being the valid probability distribution - the user must
1640
+ ensure that they update the sample probabilities such that all samples
1641
+ have probabilities between 0 and 1 and that all probabilities sum to
1642
+ one.
1643
+
1644
+ :param sample: the sample for which to update the probability
1645
+ :type sample: any
1646
+ :param prob: the new probability
1647
+ :type prob: float
1648
+ :param log: is the probability already logged
1649
+ :type log: bool
1650
+ """
1651
+ i = self._sample_dict.get(sample)
1652
+ assert i is not None
1653
+ if self._logs:
1654
+ self._data[i] = prob if log else math.log(prob, 2)
1655
+ else:
1656
+ self._data[i] = 2 ** (prob) if log else prob
1657
+
1658
+
1659
+ ##/////////////////////////////////////////////////////
1660
+ ## Kneser-Ney Probability Distribution
1661
+ ##//////////////////////////////////////////////////////
1662
+
1663
+ # This method for calculating probabilities was introduced in 1995 by Reinhard
1664
+ # Kneser and Hermann Ney. It was meant to improve the accuracy of language
1665
+ # models that use backing-off to deal with sparse data. The authors propose two
1666
+ # ways of doing so: a marginal distribution constraint on the back-off
1667
+ # distribution and a leave-one-out distribution. For a start, the first one is
1668
+ # implemented as a class below.
1669
+ #
1670
+ # The idea behind a back-off n-gram model is that we have a series of
1671
+ # frequency distributions for our n-grams so that in case we have not seen a
1672
+ # given n-gram during training (and as a result have a 0 probability for it) we
1673
+ # can 'back off' (hence the name!) and try testing whether we've seen the
1674
+ # n-1-gram part of the n-gram in training.
1675
+ #
1676
+ # The novelty of Kneser and Ney's approach was that they decided to fiddle
1677
+ # around with the way this latter, backed off probability was being calculated
1678
+ # whereas their peers seemed to focus on the primary probability.
1679
+ #
1680
+ # The implementation below uses one of the techniques described in their paper
1681
+ # titled "Improved backing-off for n-gram language modeling." In the same paper
1682
+ # another technique is introduced to attempt to smooth the back-off
1683
+ # distribution as well as the primary one. There is also a much-cited
1684
+ # modification of this method proposed by Chen and Goodman.
1685
+ #
1686
+ # In order for the implementation of Kneser-Ney to be more efficient, some
1687
+ # changes have been made to the original algorithm. Namely, the calculation of
1688
+ # the normalizing function gamma has been significantly simplified and
1689
+ # combined slightly differently with beta. None of these changes affect the
1690
+ # nature of the algorithm, but instead aim to cut out unnecessary calculations
1691
+ # and take advantage of storing and retrieving information in dictionaries
1692
+ # where possible.
1693
+
1694
+
1695
+ class KneserNeyProbDist(ProbDistI):
1696
+ """
1697
+ Kneser-Ney estimate of a probability distribution. This is a version of
1698
+ back-off that counts how likely an n-gram is provided the n-1-gram had
1699
+ been seen in training. Extends the ProbDistI interface, requires a trigram
1700
+ FreqDist instance to train on. Optionally, a different from default discount
1701
+ value can be specified. The default discount is set to 0.75.
1702
+
1703
+ """
1704
+
1705
+ def __init__(self, freqdist, bins=None, discount=0.75):
1706
+ """
1707
+ :param freqdist: The trigram frequency distribution upon which to base
1708
+ the estimation
1709
+ :type freqdist: FreqDist
1710
+ :param bins: Included for compatibility with nltk.tag.hmm
1711
+ :type bins: int or float
1712
+ :param discount: The discount applied when retrieving counts of
1713
+ trigrams
1714
+ :type discount: float (preferred, but can be set to int)
1715
+ """
1716
+
1717
+ if not bins:
1718
+ self._bins = freqdist.B()
1719
+ else:
1720
+ self._bins = bins
1721
+ self._D = discount
1722
+
1723
+ # cache for probability calculation
1724
+ self._cache = {}
1725
+
1726
+ # internal bigram and trigram frequency distributions
1727
+ self._bigrams = defaultdict(int)
1728
+ self._trigrams = freqdist
1729
+
1730
+ # helper dictionaries used to calculate probabilities
1731
+ self._wordtypes_after = defaultdict(float)
1732
+ self._trigrams_contain = defaultdict(float)
1733
+ self._wordtypes_before = defaultdict(float)
1734
+ for w0, w1, w2 in freqdist:
1735
+ self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)]
1736
+ self._wordtypes_after[(w0, w1)] += 1
1737
+ self._trigrams_contain[w1] += 1
1738
+ self._wordtypes_before[(w1, w2)] += 1
1739
+
1740
+ def prob(self, trigram):
1741
+ # sample must be a triple
1742
+ if len(trigram) != 3:
1743
+ raise ValueError("Expected an iterable with 3 members.")
1744
+ trigram = tuple(trigram)
1745
+ w0, w1, w2 = trigram
1746
+
1747
+ if trigram in self._cache:
1748
+ return self._cache[trigram]
1749
+ else:
1750
+ # if the sample trigram was seen during training
1751
+ if trigram in self._trigrams:
1752
+ prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[
1753
+ (w0, w1)
1754
+ ]
1755
+
1756
+ # else if the 'rougher' environment was seen during training
1757
+ elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before:
1758
+ aftr = self._wordtypes_after[(w0, w1)]
1759
+ bfr = self._wordtypes_before[(w1, w2)]
1760
+
1761
+ # the probability left over from alphas
1762
+ leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)]
1763
+
1764
+ # the beta (including normalization)
1765
+ beta = bfr / (self._trigrams_contain[w1] - aftr)
1766
+
1767
+ prob = leftover_prob * beta
1768
+
1769
+ # else the sample was completely unseen during training
1770
+ else:
1771
+ prob = 0.0
1772
+
1773
+ self._cache[trigram] = prob
1774
+ return prob
1775
+
1776
+ def discount(self):
1777
+ """
1778
+ Return the value by which counts are discounted. By default set to 0.75.
1779
+
1780
+ :rtype: float
1781
+ """
1782
+ return self._D
1783
+
1784
+ def set_discount(self, discount):
1785
+ """
1786
+ Set the value by which counts are discounted to the value of discount.
1787
+
1788
+ :param discount: the new value to discount counts by
1789
+ :type discount: float (preferred, but int possible)
1790
+ :rtype: None
1791
+ """
1792
+ self._D = discount
1793
+
1794
+ def samples(self):
1795
+ return self._trigrams.keys()
1796
+
1797
+ def max(self):
1798
+ return self._trigrams.max()
1799
+
1800
+ def __repr__(self):
1801
+ """
1802
+ Return a string representation of this ProbDist
1803
+
1804
+ :rtype: str
1805
+ """
1806
+ return f"<KneserNeyProbDist based on {self._trigrams.N()} trigrams"
1807
+
1808
+
1809
+ ##//////////////////////////////////////////////////////
1810
+ ## Probability Distribution Operations
1811
+ ##//////////////////////////////////////////////////////
1812
+
1813
+
1814
+ def log_likelihood(test_pdist, actual_pdist):
1815
+ if not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI):
1816
+ raise ValueError("expected a ProbDist.")
1817
+ # Is this right?
1818
+ return sum(
1819
+ actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist
1820
+ )
1821
+
1822
+
1823
+ def entropy(pdist):
1824
+ probs = (pdist.prob(s) for s in pdist.samples())
1825
+ return -sum(p * math.log(p, 2) for p in probs)
1826
+
1827
+
1828
+ ##//////////////////////////////////////////////////////
1829
+ ## Conditional Distributions
1830
+ ##//////////////////////////////////////////////////////
1831
+
1832
+
1833
+ class ConditionalFreqDist(defaultdict):
1834
+ """
1835
+ A collection of frequency distributions for a single experiment
1836
+ run under different conditions. Conditional frequency
1837
+ distributions are used to record the number of times each sample
1838
+ occurred, given the condition under which the experiment was run.
1839
+ For example, a conditional frequency distribution could be used to
1840
+ record the frequency of each word (type) in a document, given its
1841
+ length. Formally, a conditional frequency distribution can be
1842
+ defined as a function that maps from each condition to the
1843
+ FreqDist for the experiment under that condition.
1844
+
1845
+ Conditional frequency distributions are typically constructed by
1846
+ repeatedly running an experiment under a variety of conditions,
1847
+ and incrementing the sample outcome counts for the appropriate
1848
+ conditions. For example, the following code will produce a
1849
+ conditional frequency distribution that encodes how often each
1850
+ word type occurs, given the length of that word type:
1851
+
1852
+ >>> from nltk.probability import ConditionalFreqDist
1853
+ >>> from nltk.tokenize import word_tokenize
1854
+ >>> sent = "the the the dog dog some other words that we do not care about"
1855
+ >>> cfdist = ConditionalFreqDist()
1856
+ >>> for word in word_tokenize(sent):
1857
+ ... condition = len(word)
1858
+ ... cfdist[condition][word] += 1
1859
+
1860
+ An equivalent way to do this is with the initializer:
1861
+
1862
+ >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
1863
+
1864
+ The frequency distribution for each condition is accessed using
1865
+ the indexing operator:
1866
+
1867
+ >>> cfdist[3]
1868
+ FreqDist({'the': 3, 'dog': 2, 'not': 1})
1869
+ >>> cfdist[3].freq('the')
1870
+ 0.5
1871
+ >>> cfdist[3]['dog']
1872
+ 2
1873
+
1874
+ When the indexing operator is used to access the frequency
1875
+ distribution for a condition that has not been accessed before,
1876
+ ``ConditionalFreqDist`` creates a new empty FreqDist for that
1877
+ condition.
1878
+
1879
+ """
1880
+
1881
+ def __init__(self, cond_samples=None):
1882
+ """
1883
+ Construct a new empty conditional frequency distribution. In
1884
+ particular, the count for every sample, under every condition,
1885
+ is zero.
1886
+
1887
+ :param cond_samples: The samples to initialize the conditional
1888
+ frequency distribution with
1889
+ :type cond_samples: Sequence of (condition, sample) tuples
1890
+ """
1891
+ defaultdict.__init__(self, FreqDist)
1892
+
1893
+ if cond_samples:
1894
+ for (cond, sample) in cond_samples:
1895
+ self[cond][sample] += 1
1896
+
1897
+ def __reduce__(self):
1898
+ kv_pairs = ((cond, self[cond]) for cond in self.conditions())
1899
+ return (self.__class__, (), None, None, kv_pairs)
1900
+
1901
+ def conditions(self):
1902
+ """
1903
+ Return a list of the conditions that have been accessed for
1904
+ this ``ConditionalFreqDist``. Use the indexing operator to
1905
+ access the frequency distribution for a given condition.
1906
+ Note that the frequency distributions for some conditions
1907
+ may contain zero sample outcomes.
1908
+
1909
+ :rtype: list
1910
+ """
1911
+ return list(self.keys())
1912
+
1913
+ def N(self):
1914
+ """
1915
+ Return the total number of sample outcomes that have been
1916
+ recorded by this ``ConditionalFreqDist``.
1917
+
1918
+ :rtype: int
1919
+ """
1920
+ return sum(fdist.N() for fdist in self.values())
1921
+
1922
+ def plot(
1923
+ self,
1924
+ *args,
1925
+ samples=None,
1926
+ title="",
1927
+ cumulative=False,
1928
+ percents=False,
1929
+ conditions=None,
1930
+ show=True,
1931
+ **kwargs,
1932
+ ):
1933
+ """
1934
+ Plot the given samples from the conditional frequency distribution.
1935
+ For a cumulative plot, specify cumulative=True. Additional ``*args`` and
1936
+ ``**kwargs`` are passed to matplotlib's plot function.
1937
+ (Requires Matplotlib to be installed.)
1938
+
1939
+ :param samples: The samples to plot
1940
+ :type samples: list
1941
+ :param title: The title for the graph
1942
+ :type title: str
1943
+ :param cumulative: Whether the plot is cumulative. (default = False)
1944
+ :type cumulative: bool
1945
+ :param percents: Whether the plot uses percents instead of counts. (default = False)
1946
+ :type percents: bool
1947
+ :param conditions: The conditions to plot (default is all)
1948
+ :type conditions: list
1949
+ :param show: Whether to show the plot, or only return the ax.
1950
+ :type show: bool
1951
+ """
1952
+ try:
1953
+ import matplotlib.pyplot as plt # import statement fix
1954
+ except ImportError as e:
1955
+ raise ValueError(
1956
+ "The plot function requires matplotlib to be installed."
1957
+ "See https://matplotlib.org/"
1958
+ ) from e
1959
+
1960
+ if not conditions:
1961
+ conditions = self.conditions()
1962
+ else:
1963
+ conditions = [c for c in conditions if c in self]
1964
+ if not samples:
1965
+ samples = sorted({v for c in conditions for v in self[c]})
1966
+ if "linewidth" not in kwargs:
1967
+ kwargs["linewidth"] = 2
1968
+ ax = plt.gca()
1969
+ if conditions:
1970
+ freqs = []
1971
+ for condition in conditions:
1972
+ if cumulative:
1973
+ # freqs should be a list of list where each sub list will be a frequency of a condition
1974
+ freq = list(self[condition]._cumulative_frequencies(samples))
1975
+ else:
1976
+ freq = [self[condition][sample] for sample in samples]
1977
+
1978
+ if percents:
1979
+ freq = [f / self[condition].N() * 100 for f in freq]
1980
+
1981
+ freqs.append(freq)
1982
+
1983
+ if cumulative:
1984
+ ylabel = "Cumulative "
1985
+ legend_loc = "lower right"
1986
+ else:
1987
+ ylabel = ""
1988
+ legend_loc = "upper right"
1989
+
1990
+ if percents:
1991
+ ylabel += "Percents"
1992
+ else:
1993
+ ylabel += "Counts"
1994
+
1995
+ i = 0
1996
+ for freq in freqs:
1997
+ kwargs["label"] = conditions[i] # label for each condition
1998
+ i += 1
1999
+ ax.plot(freq, *args, **kwargs)
2000
+ ax.legend(loc=legend_loc)
2001
+ ax.grid(True, color="silver")
2002
+ ax.set_xticks(range(len(samples)))
2003
+ ax.set_xticklabels([str(s) for s in samples], rotation=90)
2004
+ if title:
2005
+ ax.set_title(title)
2006
+ ax.set_xlabel("Samples")
2007
+ ax.set_ylabel(ylabel)
2008
+
2009
+ if show:
2010
+ plt.show()
2011
+
2012
+ return ax
2013
+
2014
+ def tabulate(self, *args, **kwargs):
2015
+ """
2016
+ Tabulate the given samples from the conditional frequency distribution.
2017
+
2018
+ :param samples: The samples to plot
2019
+ :type samples: list
2020
+ :param conditions: The conditions to plot (default is all)
2021
+ :type conditions: list
2022
+ :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
2023
+ :type title: bool
2024
+ """
2025
+
2026
+ cumulative = _get_kwarg(kwargs, "cumulative", False)
2027
+ conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions()))
2028
+ samples = _get_kwarg(
2029
+ kwargs,
2030
+ "samples",
2031
+ sorted({v for c in conditions if c in self for v in self[c]}),
2032
+ ) # this computation could be wasted
2033
+
2034
+ width = max(len("%s" % s) for s in samples)
2035
+ freqs = dict()
2036
+ for c in conditions:
2037
+ if cumulative:
2038
+ freqs[c] = list(self[c]._cumulative_frequencies(samples))
2039
+ else:
2040
+ freqs[c] = [self[c][sample] for sample in samples]
2041
+ width = max(width, max(len("%d" % f) for f in freqs[c]))
2042
+
2043
+ condition_size = max(len("%s" % c) for c in conditions)
2044
+ print(" " * condition_size, end=" ")
2045
+ for s in samples:
2046
+ print("%*s" % (width, s), end=" ")
2047
+ print()
2048
+ for c in conditions:
2049
+ print("%*s" % (condition_size, c), end=" ")
2050
+ for f in freqs[c]:
2051
+ print("%*d" % (width, f), end=" ")
2052
+ print()
2053
+
2054
+ # Mathematical operators
2055
+
2056
+ def __add__(self, other):
2057
+ """
2058
+ Add counts from two ConditionalFreqDists.
2059
+ """
2060
+ if not isinstance(other, ConditionalFreqDist):
2061
+ return NotImplemented
2062
+ result = self.copy()
2063
+ for cond in other.conditions():
2064
+ result[cond] += other[cond]
2065
+ return result
2066
+
2067
+ def __sub__(self, other):
2068
+ """
2069
+ Subtract count, but keep only results with positive counts.
2070
+ """
2071
+ if not isinstance(other, ConditionalFreqDist):
2072
+ return NotImplemented
2073
+ result = self.copy()
2074
+ for cond in other.conditions():
2075
+ result[cond] -= other[cond]
2076
+ if not result[cond]:
2077
+ del result[cond]
2078
+ return result
2079
+
2080
+ def __or__(self, other):
2081
+ """
2082
+ Union is the maximum of value in either of the input counters.
2083
+ """
2084
+ if not isinstance(other, ConditionalFreqDist):
2085
+ return NotImplemented
2086
+ result = self.copy()
2087
+ for cond in other.conditions():
2088
+ result[cond] |= other[cond]
2089
+ return result
2090
+
2091
+ def __and__(self, other):
2092
+ """
2093
+ Intersection is the minimum of corresponding counts.
2094
+ """
2095
+ if not isinstance(other, ConditionalFreqDist):
2096
+ return NotImplemented
2097
+ result = ConditionalFreqDist()
2098
+ for cond in self.conditions():
2099
+ newfreqdist = self[cond] & other[cond]
2100
+ if newfreqdist:
2101
+ result[cond] = newfreqdist
2102
+ return result
2103
+
2104
+ # @total_ordering doesn't work here, since the class inherits from a builtin class
2105
+ def __le__(self, other):
2106
+ if not isinstance(other, ConditionalFreqDist):
2107
+ raise_unorderable_types("<=", self, other)
2108
+ return set(self.conditions()).issubset(other.conditions()) and all(
2109
+ self[c] <= other[c] for c in self.conditions()
2110
+ )
2111
+
2112
+ def __lt__(self, other):
2113
+ if not isinstance(other, ConditionalFreqDist):
2114
+ raise_unorderable_types("<", self, other)
2115
+ return self <= other and self != other
2116
+
2117
+ def __ge__(self, other):
2118
+ if not isinstance(other, ConditionalFreqDist):
2119
+ raise_unorderable_types(">=", self, other)
2120
+ return other <= self
2121
+
2122
+ def __gt__(self, other):
2123
+ if not isinstance(other, ConditionalFreqDist):
2124
+ raise_unorderable_types(">", self, other)
2125
+ return other < self
2126
+
2127
+ def deepcopy(self):
2128
+ from copy import deepcopy
2129
+
2130
+ return deepcopy(self)
2131
+
2132
+ copy = deepcopy
2133
+
2134
+ def __repr__(self):
2135
+ """
2136
+ Return a string representation of this ``ConditionalFreqDist``.
2137
+
2138
+ :rtype: str
2139
+ """
2140
+ return "<ConditionalFreqDist with %d conditions>" % len(self)
2141
+
2142
+
2143
+ class ConditionalProbDistI(dict, metaclass=ABCMeta):
2144
+ """
2145
+ A collection of probability distributions for a single experiment
2146
+ run under different conditions. Conditional probability
2147
+ distributions are used to estimate the likelihood of each sample,
2148
+ given the condition under which the experiment was run. For
2149
+ example, a conditional probability distribution could be used to
2150
+ estimate the probability of each word type in a document, given
2151
+ the length of the word type. Formally, a conditional probability
2152
+ distribution can be defined as a function that maps from each
2153
+ condition to the ``ProbDist`` for the experiment under that
2154
+ condition.
2155
+ """
2156
+
2157
+ @abstractmethod
2158
+ def __init__(self):
2159
+ """
2160
+ Classes inheriting from ConditionalProbDistI should implement __init__.
2161
+ """
2162
+
2163
+ def conditions(self):
2164
+ """
2165
+ Return a list of the conditions that are represented by
2166
+ this ``ConditionalProbDist``. Use the indexing operator to
2167
+ access the probability distribution for a given condition.
2168
+
2169
+ :rtype: list
2170
+ """
2171
+ return list(self.keys())
2172
+
2173
+ def __repr__(self):
2174
+ """
2175
+ Return a string representation of this ``ConditionalProbDist``.
2176
+
2177
+ :rtype: str
2178
+ """
2179
+ return "<%s with %d conditions>" % (type(self).__name__, len(self))
2180
+
2181
+
2182
+ class ConditionalProbDist(ConditionalProbDistI):
2183
+ """
2184
+ A conditional probability distribution modeling the experiments
2185
+ that were used to generate a conditional frequency distribution.
2186
+ A ConditionalProbDist is constructed from a
2187
+ ``ConditionalFreqDist`` and a ``ProbDist`` factory:
2188
+
2189
+ - The ``ConditionalFreqDist`` specifies the frequency
2190
+ distribution for each condition.
2191
+ - The ``ProbDist`` factory is a function that takes a
2192
+ condition's frequency distribution, and returns its
2193
+ probability distribution. A ``ProbDist`` class's name (such as
2194
+ ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
2195
+ that class's constructor.
2196
+
2197
+ The first argument to the ``ProbDist`` factory is the frequency
2198
+ distribution that it should model; and the remaining arguments are
2199
+ specified by the ``factory_args`` parameter to the
2200
+ ``ConditionalProbDist`` constructor. For example, the following
2201
+ code constructs a ``ConditionalProbDist``, where the probability
2202
+ distribution for each condition is an ``ELEProbDist`` with 10 bins:
2203
+
2204
+ >>> from nltk.corpus import brown
2205
+ >>> from nltk.probability import ConditionalFreqDist
2206
+ >>> from nltk.probability import ConditionalProbDist, ELEProbDist
2207
+ >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
2208
+ >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
2209
+ >>> cpdist['passed'].max()
2210
+ 'VBD'
2211
+ >>> cpdist['passed'].prob('VBD') #doctest: +ELLIPSIS
2212
+ 0.423...
2213
+
2214
+ """
2215
+
2216
+ def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args):
2217
+ """
2218
+ Construct a new conditional probability distribution, based on
2219
+ the given conditional frequency distribution and ``ProbDist``
2220
+ factory.
2221
+
2222
+ :type cfdist: ConditionalFreqDist
2223
+ :param cfdist: The ``ConditionalFreqDist`` specifying the
2224
+ frequency distribution for each condition.
2225
+ :type probdist_factory: class or function
2226
+ :param probdist_factory: The function or class that maps
2227
+ a condition's frequency distribution to its probability
2228
+ distribution. The function is called with the frequency
2229
+ distribution as its first argument,
2230
+ ``factory_args`` as its remaining arguments, and
2231
+ ``factory_kw_args`` as keyword arguments.
2232
+ :type factory_args: (any)
2233
+ :param factory_args: Extra arguments for ``probdist_factory``.
2234
+ These arguments are usually used to specify extra
2235
+ properties for the probability distributions of individual
2236
+ conditions, such as the number of bins they contain.
2237
+ :type factory_kw_args: (any)
2238
+ :param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
2239
+ """
2240
+ self._probdist_factory = probdist_factory
2241
+ self._factory_args = factory_args
2242
+ self._factory_kw_args = factory_kw_args
2243
+
2244
+ for condition in cfdist:
2245
+ self[condition] = probdist_factory(
2246
+ cfdist[condition], *factory_args, **factory_kw_args
2247
+ )
2248
+
2249
+ def __missing__(self, key):
2250
+ self[key] = self._probdist_factory(
2251
+ FreqDist(), *self._factory_args, **self._factory_kw_args
2252
+ )
2253
+ return self[key]
2254
+
2255
+
2256
+ class DictionaryConditionalProbDist(ConditionalProbDistI):
2257
+ """
2258
+ An alternative ConditionalProbDist that simply wraps a dictionary of
2259
+ ProbDists rather than creating these from FreqDists.
2260
+ """
2261
+
2262
+ def __init__(self, probdist_dict):
2263
+ """
2264
+ :param probdist_dict: a dictionary containing the probdists indexed
2265
+ by the conditions
2266
+ :type probdist_dict: dict any -> probdist
2267
+ """
2268
+ self.update(probdist_dict)
2269
+
2270
+ def __missing__(self, key):
2271
+ self[key] = DictionaryProbDist()
2272
+ return self[key]
2273
+
2274
+
2275
+ ##//////////////////////////////////////////////////////
2276
+ ## Adding in log-space.
2277
+ ##//////////////////////////////////////////////////////
2278
+
2279
+ # If the difference is bigger than this, then just take the bigger one:
2280
+ _ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
2281
+
2282
+
2283
+ def add_logs(logx, logy):
2284
+ """
2285
+ Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
2286
+ *log(x+y)*. Conceptually, this is the same as returning
2287
+ ``log(2**(logx)+2**(logy))``, but the actual implementation
2288
+ avoids overflow errors that could result from direct computation.
2289
+ """
2290
+ if logx < logy + _ADD_LOGS_MAX_DIFF:
2291
+ return logy
2292
+ if logy < logx + _ADD_LOGS_MAX_DIFF:
2293
+ return logx
2294
+ base = min(logx, logy)
2295
+ return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2)
2296
+
2297
+
2298
+ def sum_logs(logs):
2299
+ return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF
2300
+
2301
+
2302
+ ##//////////////////////////////////////////////////////
2303
+ ## Probabilistic Mix-in
2304
+ ##//////////////////////////////////////////////////////
2305
+
2306
+
2307
+ class ProbabilisticMixIn:
2308
+ """
2309
+ A mix-in class to associate probabilities with other classes
2310
+ (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
2311
+ define a new class that derives from an existing class and from
2312
+ ProbabilisticMixIn. You will need to define a new constructor for
2313
+ the new class, which explicitly calls the constructors of both its
2314
+ parent classes. For example:
2315
+
2316
+ >>> from nltk.probability import ProbabilisticMixIn
2317
+ >>> class A:
2318
+ ... def __init__(self, x, y): self.data = (x,y)
2319
+ ...
2320
+ >>> class ProbabilisticA(A, ProbabilisticMixIn):
2321
+ ... def __init__(self, x, y, **prob_kwarg):
2322
+ ... A.__init__(self, x, y)
2323
+ ... ProbabilisticMixIn.__init__(self, **prob_kwarg)
2324
+
2325
+ See the documentation for the ProbabilisticMixIn
2326
+ ``constructor<__init__>`` for information about the arguments it
2327
+ expects.
2328
+
2329
+ You should generally also redefine the string representation
2330
+ methods, the comparison methods, and the hashing method.
2331
+ """
2332
+
2333
+ def __init__(self, **kwargs):
2334
+ """
2335
+ Initialize this object's probability. This initializer should
2336
+ be called by subclass constructors. ``prob`` should generally be
2337
+ the first argument for those constructors.
2338
+
2339
+ :param prob: The probability associated with the object.
2340
+ :type prob: float
2341
+ :param logprob: The log of the probability associated with
2342
+ the object.
2343
+ :type logprob: float
2344
+ """
2345
+ if "prob" in kwargs:
2346
+ if "logprob" in kwargs:
2347
+ raise TypeError("Must specify either prob or logprob " "(not both)")
2348
+ else:
2349
+ ProbabilisticMixIn.set_prob(self, kwargs["prob"])
2350
+ elif "logprob" in kwargs:
2351
+ ProbabilisticMixIn.set_logprob(self, kwargs["logprob"])
2352
+ else:
2353
+ self.__prob = self.__logprob = None
2354
+
2355
+ def set_prob(self, prob):
2356
+ """
2357
+ Set the probability associated with this object to ``prob``.
2358
+
2359
+ :param prob: The new probability
2360
+ :type prob: float
2361
+ """
2362
+ self.__prob = prob
2363
+ self.__logprob = None
2364
+
2365
+ def set_logprob(self, logprob):
2366
+ """
2367
+ Set the log probability associated with this object to
2368
+ ``logprob``. I.e., set the probability associated with this
2369
+ object to ``2**(logprob)``.
2370
+
2371
+ :param logprob: The new log probability
2372
+ :type logprob: float
2373
+ """
2374
+ self.__logprob = logprob
2375
+ self.__prob = None
2376
+
2377
+ def prob(self):
2378
+ """
2379
+ Return the probability associated with this object.
2380
+
2381
+ :rtype: float
2382
+ """
2383
+ if self.__prob is None:
2384
+ if self.__logprob is None:
2385
+ return None
2386
+ self.__prob = 2 ** (self.__logprob)
2387
+ return self.__prob
2388
+
2389
+ def logprob(self):
2390
+ """
2391
+ Return ``log(p)``, where ``p`` is the probability associated
2392
+ with this object.
2393
+
2394
+ :rtype: float
2395
+ """
2396
+ if self.__logprob is None:
2397
+ if self.__prob is None:
2398
+ return None
2399
+ self.__logprob = math.log(self.__prob, 2)
2400
+ return self.__logprob
2401
+
2402
+
2403
+ class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
2404
+ def set_prob(self, prob):
2405
+ raise ValueError("%s is immutable" % self.__class__.__name__)
2406
+
2407
+ def set_logprob(self, prob):
2408
+ raise ValueError("%s is immutable" % self.__class__.__name__)
2409
+
2410
+
2411
+ ## Helper function for processing keyword arguments
2412
+
2413
+
2414
+ def _get_kwarg(kwargs, key, default):
2415
+ if key in kwargs:
2416
+ arg = kwargs[key]
2417
+ del kwargs[key]
2418
+ else:
2419
+ arg = default
2420
+ return arg
2421
+
2422
+
2423
+ ##//////////////////////////////////////////////////////
2424
+ ## Demonstration
2425
+ ##//////////////////////////////////////////////////////
2426
+
2427
+
2428
+ def _create_rand_fdist(numsamples, numoutcomes):
2429
+ """
2430
+ Create a new frequency distribution, with random samples. The
2431
+ samples are numbers from 1 to ``numsamples``, and are generated by
2432
+ summing two numbers, each of which has a uniform distribution.
2433
+ """
2434
+
2435
+ fdist = FreqDist()
2436
+ for x in range(numoutcomes):
2437
+ y = random.randint(1, (1 + numsamples) // 2) + random.randint(
2438
+ 0, numsamples // 2
2439
+ )
2440
+ fdist[y] += 1
2441
+ return fdist
2442
+
2443
+
2444
+ def _create_sum_pdist(numsamples):
2445
+ """
2446
+ Return the true probability distribution for the experiment
2447
+ ``_create_rand_fdist(numsamples, x)``.
2448
+ """
2449
+ fdist = FreqDist()
2450
+ for x in range(1, (1 + numsamples) // 2 + 1):
2451
+ for y in range(0, numsamples // 2 + 1):
2452
+ fdist[x + y] += 1
2453
+ return MLEProbDist(fdist)
2454
+
2455
+
2456
+ def demo(numsamples=6, numoutcomes=500):
2457
+ """
2458
+ A demonstration of frequency distributions and probability
2459
+ distributions. This demonstration creates three frequency
2460
+ distributions with, and uses them to sample a random process with
2461
+ ``numsamples`` samples. Each frequency distribution is sampled
2462
+ ``numoutcomes`` times. These three frequency distributions are
2463
+ then used to build six probability distributions. Finally, the
2464
+ probability estimates of these distributions are compared to the
2465
+ actual probability of each sample.
2466
+
2467
+ :type numsamples: int
2468
+ :param numsamples: The number of samples to use in each demo
2469
+ frequency distributions.
2470
+ :type numoutcomes: int
2471
+ :param numoutcomes: The total number of outcomes for each
2472
+ demo frequency distribution. These outcomes are divided into
2473
+ ``numsamples`` bins.
2474
+ :rtype: None
2475
+ """
2476
+
2477
+ # Randomly sample a stochastic process three times.
2478
+ fdist1 = _create_rand_fdist(numsamples, numoutcomes)
2479
+ fdist2 = _create_rand_fdist(numsamples, numoutcomes)
2480
+ fdist3 = _create_rand_fdist(numsamples, numoutcomes)
2481
+
2482
+ # Use our samples to create probability distributions.
2483
+ pdists = [
2484
+ MLEProbDist(fdist1),
2485
+ LidstoneProbDist(fdist1, 0.5, numsamples),
2486
+ HeldoutProbDist(fdist1, fdist2, numsamples),
2487
+ HeldoutProbDist(fdist2, fdist1, numsamples),
2488
+ CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
2489
+ SimpleGoodTuringProbDist(fdist1),
2490
+ SimpleGoodTuringProbDist(fdist1, 7),
2491
+ _create_sum_pdist(numsamples),
2492
+ ]
2493
+
2494
+ # Find the probability of each sample.
2495
+ vals = []
2496
+ for n in range(1, numsamples + 1):
2497
+ vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists]))
2498
+
2499
+ # Print the results in a formatted table.
2500
+ print(
2501
+ "%d samples (1-%d); %d outcomes were sampled for each FreqDist"
2502
+ % (numsamples, numsamples, numoutcomes)
2503
+ )
2504
+ print("=" * 9 * (len(pdists) + 2))
2505
+ FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual"
2506
+ print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
2507
+ print("-" * 9 * (len(pdists) + 2))
2508
+ FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f"
2509
+ for val in vals:
2510
+ print(FORMATSTR % val)
2511
+
2512
+ # Print the totals for each column (should all be 1.0)
2513
+ zvals = list(zip(*vals))
2514
+ sums = [sum(val) for val in zvals[1:]]
2515
+ print("-" * 9 * (len(pdists) + 2))
2516
+ FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f"
2517
+ print(FORMATSTR % tuple(sums))
2518
+ print("=" * 9 * (len(pdists) + 2))
2519
+
2520
+ # Display the distributions themselves, if they're short enough.
2521
+ if len("%s" % fdist1) < 70:
2522
+ print(" fdist1: %s" % fdist1)
2523
+ print(" fdist2: %s" % fdist2)
2524
+ print(" fdist3: %s" % fdist3)
2525
+ print()
2526
+
2527
+ print("Generating:")
2528
+ for pdist in pdists:
2529
+ fdist = FreqDist(pdist.generate() for i in range(5000))
2530
+ print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
2531
+ print()
2532
+
2533
+
2534
+ def gt_demo():
2535
+ from nltk import corpus
2536
+
2537
+ emma_words = corpus.gutenberg.words("austen-emma.txt")
2538
+ fd = FreqDist(emma_words)
2539
+ sgt = SimpleGoodTuringProbDist(fd)
2540
+ print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring"))
2541
+ fd_keys_sorted = (
2542
+ key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True)
2543
+ )
2544
+ for key in fd_keys_sorted:
2545
+ print("%18s %8d %14e" % (key, fd[key], sgt.prob(key)))
2546
+
2547
+
2548
+ if __name__ == "__main__":
2549
+ demo(6, 10)
2550
+ demo(5, 5000)
2551
+ gt_demo()
2552
+
2553
+ __all__ = [
2554
+ "ConditionalFreqDist",
2555
+ "ConditionalProbDist",
2556
+ "ConditionalProbDistI",
2557
+ "CrossValidationProbDist",
2558
+ "DictionaryConditionalProbDist",
2559
+ "DictionaryProbDist",
2560
+ "ELEProbDist",
2561
+ "FreqDist",
2562
+ "SimpleGoodTuringProbDist",
2563
+ "HeldoutProbDist",
2564
+ "ImmutableProbabilisticMixIn",
2565
+ "LaplaceProbDist",
2566
+ "LidstoneProbDist",
2567
+ "MLEProbDist",
2568
+ "MutableProbDist",
2569
+ "KneserNeyProbDist",
2570
+ "ProbDistI",
2571
+ "ProbabilisticMixIn",
2572
+ "UniformProbDist",
2573
+ "WittenBellProbDist",
2574
+ "add_logs",
2575
+ "log_likelihood",
2576
+ "sum_logs",
2577
+ "entropy",
2578
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/sem/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Semantic Interpretation
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ NLTK Semantic Interpretation Package
10
+
11
+ This package contains classes for representing semantic structure in
12
+ formulas of first-order logic and for evaluating such formulas in
13
+ set-theoretic models.
14
+
15
+ >>> from nltk.sem import logic
16
+ >>> logic._counter._value = 0
17
+
18
+ The package has two main components:
19
+
20
+ - ``logic`` provides support for analyzing expressions of First
21
+ Order Logic (FOL).
22
+ - ``evaluate`` allows users to recursively determine truth in a
23
+ model for formulas of FOL.
24
+
25
+ A model consists of a domain of discourse and a valuation function,
26
+ which assigns values to non-logical constants. We assume that entities
27
+ in the domain are represented as strings such as ``'b1'``, ``'g1'``,
28
+ etc. A ``Valuation`` is initialized with a list of (symbol, value)
29
+ pairs, where values are entities, sets of entities or sets of tuples
30
+ of entities.
31
+ The domain of discourse can be inferred from the valuation, and model
32
+ is then created with domain and valuation as parameters.
33
+
34
+ >>> from nltk.sem import Valuation, Model
35
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
36
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
37
+ ... ('dog', set(['d1'])),
38
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
39
+ >>> val = Valuation(v)
40
+ >>> dom = val.domain
41
+ >>> m = Model(dom, val)
42
+ """
43
+
44
+ from nltk.sem.boxer import Boxer
45
+ from nltk.sem.drt import DRS, DrtExpression
46
+ from nltk.sem.evaluate import (
47
+ Assignment,
48
+ Model,
49
+ Undefined,
50
+ Valuation,
51
+ arity,
52
+ is_rel,
53
+ read_valuation,
54
+ set2rel,
55
+ )
56
+ from nltk.sem.lfg import FStructure
57
+ from nltk.sem.logic import (
58
+ ApplicationExpression,
59
+ Expression,
60
+ LogicalExpressionException,
61
+ Variable,
62
+ binding_ops,
63
+ boolean_ops,
64
+ equality_preds,
65
+ read_logic,
66
+ )
67
+ from nltk.sem.relextract import clause, extract_rels, rtuple
68
+ from nltk.sem.skolemize import skolemize
69
+ from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep
70
+
71
+ # from nltk.sem.glue import Glue
72
+ # from nltk.sem.hole import HoleSemantics
73
+ # from nltk.sem.cooper_storage import CooperStore
74
+
75
+ # don't import chat80 as its names are too generic
env-llmeval/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.36 kB). View file