Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/6.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- venv/lib/python3.10/site-packages/nltk/test/parse.doctest +933 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/__init__.py +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py +116 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py +610 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py +30 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py +156 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_aline.py +48 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py +42 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_brill.py +34 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py +39 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py +49 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py +85 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_classify.py +49 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py +120 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py +98 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py +1436 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py +274 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py +48 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_data.py +15 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py +144 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_distance.py +129 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_downloader.py +19 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py +7 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_hmm.py +82 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_json2csv_corpus.py +210 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_json_serialization.py +95 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py +66 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_naivebayes.py +21 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_nombank.py +27 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_pl196x.py +13 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_pos_tag.py +83 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_ribes.py +246 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py +94 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_seekable_unicode_stream_reader.py +86 -0
- venv/lib/python3.10/site-packages/nltk/test/unit/test_senna.py +112 -0
ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:adc9fd2c4ac012d918d5483254176cf5677f7acb9aca04eb0a5de48d3ab81e44
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/6.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:558d00972395a1abdaeb05804ebc2b14e149d87aafc00ec8771ae9024c7fcb88
|
3 |
+
size 9387
|
ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0bccb633058b03095e3bb5a395f9958a3b40a21d9774b48de31afb6b9159f9b1
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d18144c169fc747fafedab07869308e97394c7193ea3dae1496d21613d800604
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/9.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2caa7a9f2c8a75408fc316af2ebaa594e9f8455d1962917f506ea1ca7d62a29b
|
3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cd828b3932e0711a39673d9a740d3d3d05ead7dd822e5057f2ec1dfd46211ea
|
3 |
+
size 33555612
|
venv/lib/python3.10/site-packages/nltk/test/parse.doctest
ADDED
@@ -0,0 +1,933 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========
|
5 |
+
Parsing
|
6 |
+
=========
|
7 |
+
|
8 |
+
Unit tests for the Context Free Grammar class
|
9 |
+
---------------------------------------------
|
10 |
+
|
11 |
+
>>> import pickle
|
12 |
+
>>> import subprocess
|
13 |
+
>>> import sys
|
14 |
+
>>> from nltk import Nonterminal, nonterminals, Production, CFG
|
15 |
+
|
16 |
+
>>> nt1 = Nonterminal('NP')
|
17 |
+
>>> nt2 = Nonterminal('VP')
|
18 |
+
|
19 |
+
>>> nt1.symbol()
|
20 |
+
'NP'
|
21 |
+
|
22 |
+
>>> nt1 == Nonterminal('NP')
|
23 |
+
True
|
24 |
+
|
25 |
+
>>> nt1 == nt2
|
26 |
+
False
|
27 |
+
|
28 |
+
>>> S, NP, VP, PP = nonterminals('S, NP, VP, PP')
|
29 |
+
>>> N, V, P, DT = nonterminals('N, V, P, DT')
|
30 |
+
|
31 |
+
>>> prod1 = Production(S, [NP, VP])
|
32 |
+
>>> prod2 = Production(NP, [DT, NP])
|
33 |
+
|
34 |
+
>>> prod1.lhs()
|
35 |
+
S
|
36 |
+
|
37 |
+
>>> prod1.rhs()
|
38 |
+
(NP, VP)
|
39 |
+
|
40 |
+
>>> prod1 == Production(S, [NP, VP])
|
41 |
+
True
|
42 |
+
|
43 |
+
>>> prod1 == prod2
|
44 |
+
False
|
45 |
+
|
46 |
+
>>> grammar = CFG.fromstring("""
|
47 |
+
... S -> NP VP
|
48 |
+
... PP -> P NP
|
49 |
+
... NP -> 'the' N | N PP | 'the' N PP
|
50 |
+
... VP -> V NP | V PP | V NP PP
|
51 |
+
... N -> 'cat'
|
52 |
+
... N -> 'dog'
|
53 |
+
... N -> 'rug'
|
54 |
+
... V -> 'chased'
|
55 |
+
... V -> 'sat'
|
56 |
+
... P -> 'in'
|
57 |
+
... P -> 'on'
|
58 |
+
... """)
|
59 |
+
|
60 |
+
>>> cmd = """import pickle
|
61 |
+
... from nltk import Production
|
62 |
+
... p = Production('S', ['NP', 'VP'])
|
63 |
+
... print(pickle.dumps(p))
|
64 |
+
... """
|
65 |
+
|
66 |
+
>>> # Start a subprocess to simulate pickling in another process
|
67 |
+
>>> proc = subprocess.run([sys.executable, '-c', cmd], stdout=subprocess.PIPE)
|
68 |
+
>>> p1 = pickle.loads(eval(proc.stdout))
|
69 |
+
>>> p2 = Production('S', ['NP', 'VP'])
|
70 |
+
>>> print(hash(p1) == hash(p2))
|
71 |
+
True
|
72 |
+
|
73 |
+
Unit tests for the rd (Recursive Descent Parser) class
|
74 |
+
------------------------------------------------------
|
75 |
+
|
76 |
+
Create and run a recursive descent parser over both a syntactically ambiguous
|
77 |
+
and unambiguous sentence.
|
78 |
+
|
79 |
+
>>> from nltk.parse import RecursiveDescentParser
|
80 |
+
>>> rd = RecursiveDescentParser(grammar)
|
81 |
+
|
82 |
+
>>> sentence1 = 'the cat chased the dog'.split()
|
83 |
+
>>> sentence2 = 'the cat chased the dog on the rug'.split()
|
84 |
+
|
85 |
+
>>> for t in rd.parse(sentence1):
|
86 |
+
... print(t)
|
87 |
+
(S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
|
88 |
+
|
89 |
+
>>> for t in rd.parse(sentence2):
|
90 |
+
... print(t)
|
91 |
+
(S
|
92 |
+
(NP the (N cat))
|
93 |
+
(VP (V chased) (NP the (N dog) (PP (P on) (NP the (N rug))))))
|
94 |
+
(S
|
95 |
+
(NP the (N cat))
|
96 |
+
(VP (V chased) (NP the (N dog)) (PP (P on) (NP the (N rug)))))
|
97 |
+
|
98 |
+
|
99 |
+
(dolist (expr doctest-font-lock-keywords)
|
100 |
+
(add-to-list 'font-lock-keywords expr))
|
101 |
+
|
102 |
+
font-lock-keywords
|
103 |
+
(add-to-list 'font-lock-keywords
|
104 |
+
(car doctest-font-lock-keywords))
|
105 |
+
|
106 |
+
|
107 |
+
Unit tests for the sr (Shift Reduce Parser) class
|
108 |
+
-------------------------------------------------
|
109 |
+
|
110 |
+
Create and run a shift reduce parser over both a syntactically ambiguous
|
111 |
+
and unambiguous sentence. Note that unlike the recursive descent parser, one
|
112 |
+
and only one parse is ever returned.
|
113 |
+
|
114 |
+
>>> from nltk.parse import ShiftReduceParser
|
115 |
+
>>> sr = ShiftReduceParser(grammar)
|
116 |
+
|
117 |
+
>>> sentence1 = 'the cat chased the dog'.split()
|
118 |
+
>>> sentence2 = 'the cat chased the dog on the rug'.split()
|
119 |
+
|
120 |
+
>>> for t in sr.parse(sentence1):
|
121 |
+
... print(t)
|
122 |
+
(S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
|
123 |
+
|
124 |
+
|
125 |
+
The shift reduce parser uses heuristics to decide what to do when there are
|
126 |
+
multiple possible shift or reduce operations available - for the supplied
|
127 |
+
grammar clearly the wrong operation is selected.
|
128 |
+
|
129 |
+
>>> for t in sr.parse(sentence2):
|
130 |
+
... print(t)
|
131 |
+
|
132 |
+
|
133 |
+
Unit tests for the Chart Parser class
|
134 |
+
-------------------------------------
|
135 |
+
|
136 |
+
We use the demo() function for testing.
|
137 |
+
We must turn off showing of times.
|
138 |
+
|
139 |
+
>>> import nltk
|
140 |
+
|
141 |
+
First we test tracing with a short sentence
|
142 |
+
|
143 |
+
>>> nltk.parse.chart.demo(2, print_times=False, trace=1,
|
144 |
+
... sent='I saw a dog', numparses=1)
|
145 |
+
* Sentence:
|
146 |
+
I saw a dog
|
147 |
+
['I', 'saw', 'a', 'dog']
|
148 |
+
<BLANKLINE>
|
149 |
+
* Strategy: Bottom-up
|
150 |
+
<BLANKLINE>
|
151 |
+
|. I . saw . a . dog .|
|
152 |
+
|[---------] . . .| [0:1] 'I'
|
153 |
+
|. [---------] . .| [1:2] 'saw'
|
154 |
+
|. . [---------] .| [2:3] 'a'
|
155 |
+
|. . . [---------]| [3:4] 'dog'
|
156 |
+
|> . . . .| [0:0] NP -> * 'I'
|
157 |
+
|[---------] . . .| [0:1] NP -> 'I' *
|
158 |
+
|> . . . .| [0:0] S -> * NP VP
|
159 |
+
|> . . . .| [0:0] NP -> * NP PP
|
160 |
+
|[---------> . . .| [0:1] S -> NP * VP
|
161 |
+
|[---------> . . .| [0:1] NP -> NP * PP
|
162 |
+
|. > . . .| [1:1] Verb -> * 'saw'
|
163 |
+
|. [---------] . .| [1:2] Verb -> 'saw' *
|
164 |
+
|. > . . .| [1:1] VP -> * Verb NP
|
165 |
+
|. > . . .| [1:1] VP -> * Verb
|
166 |
+
|. [---------> . .| [1:2] VP -> Verb * NP
|
167 |
+
|. [---------] . .| [1:2] VP -> Verb *
|
168 |
+
|. > . . .| [1:1] VP -> * VP PP
|
169 |
+
|[-------------------] . .| [0:2] S -> NP VP *
|
170 |
+
|. [---------> . .| [1:2] VP -> VP * PP
|
171 |
+
|. . > . .| [2:2] Det -> * 'a'
|
172 |
+
|. . [---------] .| [2:3] Det -> 'a' *
|
173 |
+
|. . > . .| [2:2] NP -> * Det Noun
|
174 |
+
|. . [---------> .| [2:3] NP -> Det * Noun
|
175 |
+
|. . . > .| [3:3] Noun -> * 'dog'
|
176 |
+
|. . . [---------]| [3:4] Noun -> 'dog' *
|
177 |
+
|. . [-------------------]| [2:4] NP -> Det Noun *
|
178 |
+
|. . > . .| [2:2] S -> * NP VP
|
179 |
+
|. . > . .| [2:2] NP -> * NP PP
|
180 |
+
|. [-----------------------------]| [1:4] VP -> Verb NP *
|
181 |
+
|. . [------------------->| [2:4] S -> NP * VP
|
182 |
+
|. . [------------------->| [2:4] NP -> NP * PP
|
183 |
+
|[=======================================]| [0:4] S -> NP VP *
|
184 |
+
|. [----------------------------->| [1:4] VP -> VP * PP
|
185 |
+
Nr edges in chart: 33
|
186 |
+
(S (NP I) (VP (Verb saw) (NP (Det a) (Noun dog))))
|
187 |
+
<BLANKLINE>
|
188 |
+
|
189 |
+
Then we test the different parsing Strategies.
|
190 |
+
Note that the number of edges differ between the strategies.
|
191 |
+
|
192 |
+
Top-down
|
193 |
+
|
194 |
+
>>> nltk.parse.chart.demo(1, print_times=False, trace=0,
|
195 |
+
... sent='I saw John with a dog', numparses=2)
|
196 |
+
* Sentence:
|
197 |
+
I saw John with a dog
|
198 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
199 |
+
<BLANKLINE>
|
200 |
+
* Strategy: Top-down
|
201 |
+
<BLANKLINE>
|
202 |
+
Nr edges in chart: 48
|
203 |
+
(S
|
204 |
+
(NP I)
|
205 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
206 |
+
(S
|
207 |
+
(NP I)
|
208 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
209 |
+
<BLANKLINE>
|
210 |
+
|
211 |
+
Bottom-up
|
212 |
+
|
213 |
+
>>> nltk.parse.chart.demo(2, print_times=False, trace=0,
|
214 |
+
... sent='I saw John with a dog', numparses=2)
|
215 |
+
* Sentence:
|
216 |
+
I saw John with a dog
|
217 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
218 |
+
<BLANKLINE>
|
219 |
+
* Strategy: Bottom-up
|
220 |
+
<BLANKLINE>
|
221 |
+
Nr edges in chart: 53
|
222 |
+
(S
|
223 |
+
(NP I)
|
224 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
225 |
+
(S
|
226 |
+
(NP I)
|
227 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
228 |
+
<BLANKLINE>
|
229 |
+
|
230 |
+
Bottom-up Left-Corner
|
231 |
+
|
232 |
+
>>> nltk.parse.chart.demo(3, print_times=False, trace=0,
|
233 |
+
... sent='I saw John with a dog', numparses=2)
|
234 |
+
* Sentence:
|
235 |
+
I saw John with a dog
|
236 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
237 |
+
<BLANKLINE>
|
238 |
+
* Strategy: Bottom-up left-corner
|
239 |
+
<BLANKLINE>
|
240 |
+
Nr edges in chart: 36
|
241 |
+
(S
|
242 |
+
(NP I)
|
243 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
244 |
+
(S
|
245 |
+
(NP I)
|
246 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
247 |
+
<BLANKLINE>
|
248 |
+
|
249 |
+
Left-Corner with Bottom-Up Filter
|
250 |
+
|
251 |
+
>>> nltk.parse.chart.demo(4, print_times=False, trace=0,
|
252 |
+
... sent='I saw John with a dog', numparses=2)
|
253 |
+
* Sentence:
|
254 |
+
I saw John with a dog
|
255 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
256 |
+
<BLANKLINE>
|
257 |
+
* Strategy: Filtered left-corner
|
258 |
+
<BLANKLINE>
|
259 |
+
Nr edges in chart: 28
|
260 |
+
(S
|
261 |
+
(NP I)
|
262 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
263 |
+
(S
|
264 |
+
(NP I)
|
265 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
266 |
+
<BLANKLINE>
|
267 |
+
|
268 |
+
The stepping chart parser
|
269 |
+
|
270 |
+
>>> nltk.parse.chart.demo(5, print_times=False, trace=1,
|
271 |
+
... sent='I saw John with a dog', numparses=2)
|
272 |
+
* Sentence:
|
273 |
+
I saw John with a dog
|
274 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
275 |
+
<BLANKLINE>
|
276 |
+
* Strategy: Stepping (top-down vs bottom-up)
|
277 |
+
<BLANKLINE>
|
278 |
+
*** SWITCH TO TOP DOWN
|
279 |
+
|[------] . . . . .| [0:1] 'I'
|
280 |
+
|. [------] . . . .| [1:2] 'saw'
|
281 |
+
|. . [------] . . .| [2:3] 'John'
|
282 |
+
|. . . [------] . .| [3:4] 'with'
|
283 |
+
|. . . . [------] .| [4:5] 'a'
|
284 |
+
|. . . . . [------]| [5:6] 'dog'
|
285 |
+
|> . . . . . .| [0:0] S -> * NP VP
|
286 |
+
|> . . . . . .| [0:0] NP -> * NP PP
|
287 |
+
|> . . . . . .| [0:0] NP -> * Det Noun
|
288 |
+
|> . . . . . .| [0:0] NP -> * 'I'
|
289 |
+
|[------] . . . . .| [0:1] NP -> 'I' *
|
290 |
+
|[------> . . . . .| [0:1] S -> NP * VP
|
291 |
+
|[------> . . . . .| [0:1] NP -> NP * PP
|
292 |
+
|. > . . . . .| [1:1] VP -> * VP PP
|
293 |
+
|. > . . . . .| [1:1] VP -> * Verb NP
|
294 |
+
|. > . . . . .| [1:1] VP -> * Verb
|
295 |
+
|. > . . . . .| [1:1] Verb -> * 'saw'
|
296 |
+
|. [------] . . . .| [1:2] Verb -> 'saw' *
|
297 |
+
|. [------> . . . .| [1:2] VP -> Verb * NP
|
298 |
+
|. [------] . . . .| [1:2] VP -> Verb *
|
299 |
+
|[-------------] . . . .| [0:2] S -> NP VP *
|
300 |
+
|. [------> . . . .| [1:2] VP -> VP * PP
|
301 |
+
*** SWITCH TO BOTTOM UP
|
302 |
+
|. . > . . . .| [2:2] NP -> * 'John'
|
303 |
+
|. . . > . . .| [3:3] PP -> * 'with' NP
|
304 |
+
|. . . > . . .| [3:3] Prep -> * 'with'
|
305 |
+
|. . . . > . .| [4:4] Det -> * 'a'
|
306 |
+
|. . . . . > .| [5:5] Noun -> * 'dog'
|
307 |
+
|. . [------] . . .| [2:3] NP -> 'John' *
|
308 |
+
|. . . [------> . .| [3:4] PP -> 'with' * NP
|
309 |
+
|. . . [------] . .| [3:4] Prep -> 'with' *
|
310 |
+
|. . . . [------] .| [4:5] Det -> 'a' *
|
311 |
+
|. . . . . [------]| [5:6] Noun -> 'dog' *
|
312 |
+
|. [-------------] . . .| [1:3] VP -> Verb NP *
|
313 |
+
|[--------------------] . . .| [0:3] S -> NP VP *
|
314 |
+
|. [-------------> . . .| [1:3] VP -> VP * PP
|
315 |
+
|. . > . . . .| [2:2] S -> * NP VP
|
316 |
+
|. . > . . . .| [2:2] NP -> * NP PP
|
317 |
+
|. . . . > . .| [4:4] NP -> * Det Noun
|
318 |
+
|. . [------> . . .| [2:3] S -> NP * VP
|
319 |
+
|. . [------> . . .| [2:3] NP -> NP * PP
|
320 |
+
|. . . . [------> .| [4:5] NP -> Det * Noun
|
321 |
+
|. . . . [-------------]| [4:6] NP -> Det Noun *
|
322 |
+
|. . . [--------------------]| [3:6] PP -> 'with' NP *
|
323 |
+
|. [----------------------------------]| [1:6] VP -> VP PP *
|
324 |
+
*** SWITCH TO TOP DOWN
|
325 |
+
|. . > . . . .| [2:2] NP -> * Det Noun
|
326 |
+
|. . . . > . .| [4:4] NP -> * NP PP
|
327 |
+
|. . . > . . .| [3:3] VP -> * VP PP
|
328 |
+
|. . . > . . .| [3:3] VP -> * Verb NP
|
329 |
+
|. . . > . . .| [3:3] VP -> * Verb
|
330 |
+
|[=========================================]| [0:6] S -> NP VP *
|
331 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
332 |
+
|. . [---------------------------]| [2:6] NP -> NP PP *
|
333 |
+
|. . . . [------------->| [4:6] NP -> NP * PP
|
334 |
+
|. [----------------------------------]| [1:6] VP -> Verb NP *
|
335 |
+
|. . [--------------------------->| [2:6] S -> NP * VP
|
336 |
+
|. . [--------------------------->| [2:6] NP -> NP * PP
|
337 |
+
|[=========================================]| [0:6] S -> NP VP *
|
338 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
339 |
+
|. . . . . . >| [6:6] VP -> * VP PP
|
340 |
+
|. . . . . . >| [6:6] VP -> * Verb NP
|
341 |
+
|. . . . . . >| [6:6] VP -> * Verb
|
342 |
+
*** SWITCH TO BOTTOM UP
|
343 |
+
|. . . . > . .| [4:4] S -> * NP VP
|
344 |
+
|. . . . [------------->| [4:6] S -> NP * VP
|
345 |
+
*** SWITCH TO TOP DOWN
|
346 |
+
*** SWITCH TO BOTTOM UP
|
347 |
+
*** SWITCH TO TOP DOWN
|
348 |
+
*** SWITCH TO BOTTOM UP
|
349 |
+
*** SWITCH TO TOP DOWN
|
350 |
+
*** SWITCH TO BOTTOM UP
|
351 |
+
Nr edges in chart: 61
|
352 |
+
(S
|
353 |
+
(NP I)
|
354 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
355 |
+
(S
|
356 |
+
(NP I)
|
357 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
358 |
+
<BLANKLINE>
|
359 |
+
|
360 |
+
|
361 |
+
Unit tests for the Incremental Chart Parser class
|
362 |
+
-------------------------------------------------
|
363 |
+
|
364 |
+
The incremental chart parsers are defined in earleychart.py.
|
365 |
+
We use the demo() function for testing. We must turn off showing of times.
|
366 |
+
|
367 |
+
>>> import nltk
|
368 |
+
|
369 |
+
Earley Chart Parser
|
370 |
+
|
371 |
+
>>> nltk.parse.earleychart.demo(print_times=False, trace=1,
|
372 |
+
... sent='I saw John with a dog', numparses=2)
|
373 |
+
* Sentence:
|
374 |
+
I saw John with a dog
|
375 |
+
['I', 'saw', 'John', 'with', 'a', 'dog']
|
376 |
+
<BLANKLINE>
|
377 |
+
|. I . saw . John . with . a . dog .|
|
378 |
+
|[------] . . . . .| [0:1] 'I'
|
379 |
+
|. [------] . . . .| [1:2] 'saw'
|
380 |
+
|. . [------] . . .| [2:3] 'John'
|
381 |
+
|. . . [------] . .| [3:4] 'with'
|
382 |
+
|. . . . [------] .| [4:5] 'a'
|
383 |
+
|. . . . . [------]| [5:6] 'dog'
|
384 |
+
|> . . . . . .| [0:0] S -> * NP VP
|
385 |
+
|> . . . . . .| [0:0] NP -> * NP PP
|
386 |
+
|> . . . . . .| [0:0] NP -> * Det Noun
|
387 |
+
|> . . . . . .| [0:0] NP -> * 'I'
|
388 |
+
|[------] . . . . .| [0:1] NP -> 'I' *
|
389 |
+
|[------> . . . . .| [0:1] S -> NP * VP
|
390 |
+
|[------> . . . . .| [0:1] NP -> NP * PP
|
391 |
+
|. > . . . . .| [1:1] VP -> * VP PP
|
392 |
+
|. > . . . . .| [1:1] VP -> * Verb NP
|
393 |
+
|. > . . . . .| [1:1] VP -> * Verb
|
394 |
+
|. > . . . . .| [1:1] Verb -> * 'saw'
|
395 |
+
|. [------] . . . .| [1:2] Verb -> 'saw' *
|
396 |
+
|. [------> . . . .| [1:2] VP -> Verb * NP
|
397 |
+
|. [------] . . . .| [1:2] VP -> Verb *
|
398 |
+
|[-------------] . . . .| [0:2] S -> NP VP *
|
399 |
+
|. [------> . . . .| [1:2] VP -> VP * PP
|
400 |
+
|. . > . . . .| [2:2] NP -> * NP PP
|
401 |
+
|. . > . . . .| [2:2] NP -> * Det Noun
|
402 |
+
|. . > . . . .| [2:2] NP -> * 'John'
|
403 |
+
|. . [------] . . .| [2:3] NP -> 'John' *
|
404 |
+
|. [-------------] . . .| [1:3] VP -> Verb NP *
|
405 |
+
|. . [------> . . .| [2:3] NP -> NP * PP
|
406 |
+
|. . . > . . .| [3:3] PP -> * 'with' NP
|
407 |
+
|[--------------------] . . .| [0:3] S -> NP VP *
|
408 |
+
|. [-------------> . . .| [1:3] VP -> VP * PP
|
409 |
+
|. . . [------> . .| [3:4] PP -> 'with' * NP
|
410 |
+
|. . . . > . .| [4:4] NP -> * NP PP
|
411 |
+
|. . . . > . .| [4:4] NP -> * Det Noun
|
412 |
+
|. . . . > . .| [4:4] Det -> * 'a'
|
413 |
+
|. . . . [------] .| [4:5] Det -> 'a' *
|
414 |
+
|. . . . [------> .| [4:5] NP -> Det * Noun
|
415 |
+
|. . . . . > .| [5:5] Noun -> * 'dog'
|
416 |
+
|. . . . . [------]| [5:6] Noun -> 'dog' *
|
417 |
+
|. . . . [-------------]| [4:6] NP -> Det Noun *
|
418 |
+
|. . . [--------------------]| [3:6] PP -> 'with' NP *
|
419 |
+
|. . . . [------------->| [4:6] NP -> NP * PP
|
420 |
+
|. . [---------------------------]| [2:6] NP -> NP PP *
|
421 |
+
|. [----------------------------------]| [1:6] VP -> VP PP *
|
422 |
+
|[=========================================]| [0:6] S -> NP VP *
|
423 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
424 |
+
|. [----------------------------------]| [1:6] VP -> Verb NP *
|
425 |
+
|. . [--------------------------->| [2:6] NP -> NP * PP
|
426 |
+
|[=========================================]| [0:6] S -> NP VP *
|
427 |
+
|. [---------------------------------->| [1:6] VP -> VP * PP
|
428 |
+
(S
|
429 |
+
(NP I)
|
430 |
+
(VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
|
431 |
+
(S
|
432 |
+
(NP I)
|
433 |
+
(VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
|
434 |
+
|
435 |
+
|
436 |
+
Unit tests for LARGE context-free grammars
|
437 |
+
------------------------------------------
|
438 |
+
|
439 |
+
Reading the ATIS grammar.
|
440 |
+
|
441 |
+
>>> grammar = nltk.data.load('grammars/large_grammars/atis.cfg')
|
442 |
+
>>> grammar
|
443 |
+
<Grammar with 5517 productions>
|
444 |
+
|
445 |
+
Reading the test sentences.
|
446 |
+
|
447 |
+
>>> sentences = nltk.data.load('grammars/large_grammars/atis_sentences.txt')
|
448 |
+
>>> sentences = nltk.parse.util.extract_test_sentences(sentences)
|
449 |
+
>>> len(sentences)
|
450 |
+
98
|
451 |
+
>>> testsentence = sentences[22]
|
452 |
+
>>> testsentence[0]
|
453 |
+
['show', 'me', 'northwest', 'flights', 'to', 'detroit', '.']
|
454 |
+
>>> testsentence[1]
|
455 |
+
17
|
456 |
+
>>> sentence = testsentence[0]
|
457 |
+
|
458 |
+
Now we test all different parsing strategies.
|
459 |
+
Note that the number of edges differ between the strategies.
|
460 |
+
|
461 |
+
Bottom-up parsing.
|
462 |
+
|
463 |
+
>>> parser = nltk.parse.BottomUpChartParser(grammar)
|
464 |
+
>>> chart = parser.chart_parse(sentence)
|
465 |
+
>>> print((chart.num_edges()))
|
466 |
+
7661
|
467 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
468 |
+
17
|
469 |
+
|
470 |
+
Bottom-up Left-corner parsing.
|
471 |
+
|
472 |
+
>>> parser = nltk.parse.BottomUpLeftCornerChartParser(grammar)
|
473 |
+
>>> chart = parser.chart_parse(sentence)
|
474 |
+
>>> print((chart.num_edges()))
|
475 |
+
4986
|
476 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
477 |
+
17
|
478 |
+
|
479 |
+
Left-corner parsing with bottom-up filter.
|
480 |
+
|
481 |
+
>>> parser = nltk.parse.LeftCornerChartParser(grammar)
|
482 |
+
>>> chart = parser.chart_parse(sentence)
|
483 |
+
>>> print((chart.num_edges()))
|
484 |
+
1342
|
485 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
486 |
+
17
|
487 |
+
|
488 |
+
Top-down parsing.
|
489 |
+
|
490 |
+
>>> parser = nltk.parse.TopDownChartParser(grammar)
|
491 |
+
>>> chart = parser.chart_parse(sentence)
|
492 |
+
>>> print((chart.num_edges()))
|
493 |
+
28352
|
494 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
495 |
+
17
|
496 |
+
|
497 |
+
Incremental Bottom-up parsing.
|
498 |
+
|
499 |
+
>>> parser = nltk.parse.IncrementalBottomUpChartParser(grammar)
|
500 |
+
>>> chart = parser.chart_parse(sentence)
|
501 |
+
>>> print((chart.num_edges()))
|
502 |
+
7661
|
503 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
504 |
+
17
|
505 |
+
|
506 |
+
Incremental Bottom-up Left-corner parsing.
|
507 |
+
|
508 |
+
>>> parser = nltk.parse.IncrementalBottomUpLeftCornerChartParser(grammar)
|
509 |
+
>>> chart = parser.chart_parse(sentence)
|
510 |
+
>>> print((chart.num_edges()))
|
511 |
+
4986
|
512 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
513 |
+
17
|
514 |
+
|
515 |
+
Incremental Left-corner parsing with bottom-up filter.
|
516 |
+
|
517 |
+
>>> parser = nltk.parse.IncrementalLeftCornerChartParser(grammar)
|
518 |
+
>>> chart = parser.chart_parse(sentence)
|
519 |
+
>>> print((chart.num_edges()))
|
520 |
+
1342
|
521 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
522 |
+
17
|
523 |
+
|
524 |
+
Incremental Top-down parsing.
|
525 |
+
|
526 |
+
>>> parser = nltk.parse.IncrementalTopDownChartParser(grammar)
|
527 |
+
>>> chart = parser.chart_parse(sentence)
|
528 |
+
>>> print((chart.num_edges()))
|
529 |
+
28352
|
530 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
531 |
+
17
|
532 |
+
|
533 |
+
Earley parsing. This is similar to the incremental top-down algorithm.
|
534 |
+
|
535 |
+
>>> parser = nltk.parse.EarleyChartParser(grammar)
|
536 |
+
>>> chart = parser.chart_parse(sentence)
|
537 |
+
>>> print((chart.num_edges()))
|
538 |
+
28352
|
539 |
+
>>> print((len(list(chart.parses(grammar.start())))))
|
540 |
+
17
|
541 |
+
|
542 |
+
|
543 |
+
Unit tests for the Probabilistic CFG class
|
544 |
+
------------------------------------------
|
545 |
+
|
546 |
+
>>> from nltk.corpus import treebank
|
547 |
+
>>> from itertools import islice
|
548 |
+
>>> from nltk.grammar import PCFG, induce_pcfg
|
549 |
+
>>> toy_pcfg1 = PCFG.fromstring("""
|
550 |
+
... S -> NP VP [1.0]
|
551 |
+
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
552 |
+
... Det -> 'the' [0.8] | 'my' [0.2]
|
553 |
+
... N -> 'man' [0.5] | 'telescope' [0.5]
|
554 |
+
... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
555 |
+
... V -> 'ate' [0.35] | 'saw' [0.65]
|
556 |
+
... PP -> P NP [1.0]
|
557 |
+
... P -> 'with' [0.61] | 'under' [0.39]
|
558 |
+
... """)
|
559 |
+
|
560 |
+
>>> toy_pcfg2 = PCFG.fromstring("""
|
561 |
+
... S -> NP VP [1.0]
|
562 |
+
... VP -> V NP [.59]
|
563 |
+
... VP -> V [.40]
|
564 |
+
... VP -> VP PP [.01]
|
565 |
+
... NP -> Det N [.41]
|
566 |
+
... NP -> Name [.28]
|
567 |
+
... NP -> NP PP [.31]
|
568 |
+
... PP -> P NP [1.0]
|
569 |
+
... V -> 'saw' [.21]
|
570 |
+
... V -> 'ate' [.51]
|
571 |
+
... V -> 'ran' [.28]
|
572 |
+
... N -> 'boy' [.11]
|
573 |
+
... N -> 'cookie' [.12]
|
574 |
+
... N -> 'table' [.13]
|
575 |
+
... N -> 'telescope' [.14]
|
576 |
+
... N -> 'hill' [.5]
|
577 |
+
... Name -> 'Jack' [.52]
|
578 |
+
... Name -> 'Bob' [.48]
|
579 |
+
... P -> 'with' [.61]
|
580 |
+
... P -> 'under' [.39]
|
581 |
+
... Det -> 'the' [.41]
|
582 |
+
... Det -> 'a' [.31]
|
583 |
+
... Det -> 'my' [.28]
|
584 |
+
... """)
|
585 |
+
|
586 |
+
Create a set of PCFG productions.
|
587 |
+
|
588 |
+
>>> grammar = PCFG.fromstring("""
|
589 |
+
... A -> B B [.3] | C B C [.7]
|
590 |
+
... B -> B D [.5] | C [.5]
|
591 |
+
... C -> 'a' [.1] | 'b' [0.9]
|
592 |
+
... D -> 'b' [1.0]
|
593 |
+
... """)
|
594 |
+
>>> prod = grammar.productions()[0]
|
595 |
+
>>> prod
|
596 |
+
A -> B B [0.3]
|
597 |
+
|
598 |
+
>>> prod.lhs()
|
599 |
+
A
|
600 |
+
|
601 |
+
>>> prod.rhs()
|
602 |
+
(B, B)
|
603 |
+
|
604 |
+
>>> print((prod.prob()))
|
605 |
+
0.3
|
606 |
+
|
607 |
+
>>> grammar.start()
|
608 |
+
A
|
609 |
+
|
610 |
+
>>> grammar.productions()
|
611 |
+
[A -> B B [0.3], A -> C B C [0.7], B -> B D [0.5], B -> C [0.5], C -> 'a' [0.1], C -> 'b' [0.9], D -> 'b' [1.0]]
|
612 |
+
|
613 |
+
Induce some productions using parsed Treebank data.
|
614 |
+
|
615 |
+
>>> productions = []
|
616 |
+
>>> for fileid in treebank.fileids()[:2]:
|
617 |
+
... for t in treebank.parsed_sents(fileid):
|
618 |
+
... productions += t.productions()
|
619 |
+
|
620 |
+
>>> grammar = induce_pcfg(S, productions)
|
621 |
+
>>> grammar
|
622 |
+
<Grammar with 71 productions>
|
623 |
+
|
624 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('PP')))[:2]
|
625 |
+
[PP -> IN NP [1.0]]
|
626 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('NNP')))[:2]
|
627 |
+
[NNP -> 'Agnew' [0.0714286], NNP -> 'Consolidated' [0.0714286]]
|
628 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('JJ')))[:2]
|
629 |
+
[JJ -> 'British' [0.142857], JJ -> 'former' [0.142857]]
|
630 |
+
>>> sorted(grammar.productions(lhs=Nonterminal('NP')))[:2]
|
631 |
+
[NP -> CD NNS [0.133333], NP -> DT JJ JJ NN [0.0666667]]
|
632 |
+
|
633 |
+
Unit tests for the Probabilistic Chart Parse classes
|
634 |
+
----------------------------------------------------
|
635 |
+
|
636 |
+
>>> tokens = "Jack saw Bob with my cookie".split()
|
637 |
+
>>> grammar = toy_pcfg2
|
638 |
+
>>> print(grammar)
|
639 |
+
Grammar with 23 productions (start state = S)
|
640 |
+
S -> NP VP [1.0]
|
641 |
+
VP -> V NP [0.59]
|
642 |
+
VP -> V [0.4]
|
643 |
+
VP -> VP PP [0.01]
|
644 |
+
NP -> Det N [0.41]
|
645 |
+
NP -> Name [0.28]
|
646 |
+
NP -> NP PP [0.31]
|
647 |
+
PP -> P NP [1.0]
|
648 |
+
V -> 'saw' [0.21]
|
649 |
+
V -> 'ate' [0.51]
|
650 |
+
V -> 'ran' [0.28]
|
651 |
+
N -> 'boy' [0.11]
|
652 |
+
N -> 'cookie' [0.12]
|
653 |
+
N -> 'table' [0.13]
|
654 |
+
N -> 'telescope' [0.14]
|
655 |
+
N -> 'hill' [0.5]
|
656 |
+
Name -> 'Jack' [0.52]
|
657 |
+
Name -> 'Bob' [0.48]
|
658 |
+
P -> 'with' [0.61]
|
659 |
+
P -> 'under' [0.39]
|
660 |
+
Det -> 'the' [0.41]
|
661 |
+
Det -> 'a' [0.31]
|
662 |
+
Det -> 'my' [0.28]
|
663 |
+
|
664 |
+
Create several parsers using different queuing strategies and show the
|
665 |
+
resulting parses.
|
666 |
+
|
667 |
+
>>> from nltk.parse import pchart
|
668 |
+
|
669 |
+
>>> parser = pchart.InsideChartParser(grammar)
|
670 |
+
>>> for t in parser.parse(tokens):
|
671 |
+
... print(t)
|
672 |
+
(S
|
673 |
+
(NP (Name Jack))
|
674 |
+
(VP
|
675 |
+
(V saw)
|
676 |
+
(NP
|
677 |
+
(NP (Name Bob))
|
678 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
679 |
+
(S
|
680 |
+
(NP (Name Jack))
|
681 |
+
(VP
|
682 |
+
(VP (V saw) (NP (Name Bob)))
|
683 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
684 |
+
|
685 |
+
>>> parser = pchart.RandomChartParser(grammar)
|
686 |
+
>>> for t in parser.parse(tokens):
|
687 |
+
... print(t)
|
688 |
+
(S
|
689 |
+
(NP (Name Jack))
|
690 |
+
(VP
|
691 |
+
(V saw)
|
692 |
+
(NP
|
693 |
+
(NP (Name Bob))
|
694 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
695 |
+
(S
|
696 |
+
(NP (Name Jack))
|
697 |
+
(VP
|
698 |
+
(VP (V saw) (NP (Name Bob)))
|
699 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
700 |
+
|
701 |
+
>>> parser = pchart.UnsortedChartParser(grammar)
|
702 |
+
>>> for t in parser.parse(tokens):
|
703 |
+
... print(t)
|
704 |
+
(S
|
705 |
+
(NP (Name Jack))
|
706 |
+
(VP
|
707 |
+
(V saw)
|
708 |
+
(NP
|
709 |
+
(NP (Name Bob))
|
710 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
711 |
+
(S
|
712 |
+
(NP (Name Jack))
|
713 |
+
(VP
|
714 |
+
(VP (V saw) (NP (Name Bob)))
|
715 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
716 |
+
|
717 |
+
>>> parser = pchart.LongestChartParser(grammar)
|
718 |
+
>>> for t in parser.parse(tokens):
|
719 |
+
... print(t)
|
720 |
+
(S
|
721 |
+
(NP (Name Jack))
|
722 |
+
(VP
|
723 |
+
(V saw)
|
724 |
+
(NP
|
725 |
+
(NP (Name Bob))
|
726 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
727 |
+
(S
|
728 |
+
(NP (Name Jack))
|
729 |
+
(VP
|
730 |
+
(VP (V saw) (NP (Name Bob)))
|
731 |
+
(PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
|
732 |
+
|
733 |
+
>>> parser = pchart.InsideChartParser(grammar, beam_size = len(tokens)+1)
|
734 |
+
>>> for t in parser.parse(tokens):
|
735 |
+
... print(t)
|
736 |
+
|
737 |
+
|
738 |
+
Unit tests for the Viterbi Parse classes
|
739 |
+
----------------------------------------
|
740 |
+
|
741 |
+
>>> from nltk.parse import ViterbiParser
|
742 |
+
>>> tokens = "Jack saw Bob with my cookie".split()
|
743 |
+
>>> grammar = toy_pcfg2
|
744 |
+
|
745 |
+
Parse the tokenized sentence.
|
746 |
+
|
747 |
+
>>> parser = ViterbiParser(grammar)
|
748 |
+
>>> for t in parser.parse(tokens):
|
749 |
+
... print(t)
|
750 |
+
(S
|
751 |
+
(NP (Name Jack))
|
752 |
+
(VP
|
753 |
+
(V saw)
|
754 |
+
(NP
|
755 |
+
(NP (Name Bob))
|
756 |
+
(PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
|
757 |
+
|
758 |
+
|
759 |
+
Unit tests for the FeatStructNonterminal class
|
760 |
+
----------------------------------------------
|
761 |
+
|
762 |
+
>>> from nltk.grammar import FeatStructNonterminal
|
763 |
+
>>> FeatStructNonterminal(
|
764 |
+
... pos='n', agr=FeatStructNonterminal(number='pl', gender='f'))
|
765 |
+
[agr=[gender='f', number='pl'], pos='n']
|
766 |
+
|
767 |
+
>>> FeatStructNonterminal('VP[+fin]/NP[+pl]')
|
768 |
+
VP[+fin]/NP[+pl]
|
769 |
+
|
770 |
+
|
771 |
+
Tracing the Feature Chart Parser
|
772 |
+
--------------------------------
|
773 |
+
|
774 |
+
We use the featurechart.demo() function for tracing the Feature Chart Parser.
|
775 |
+
|
776 |
+
>>> nltk.parse.featurechart.demo(print_times=False,
|
777 |
+
... print_grammar=True,
|
778 |
+
... parser=nltk.parse.featurechart.FeatureChartParser,
|
779 |
+
... sent='I saw John with a dog')
|
780 |
+
<BLANKLINE>
|
781 |
+
Grammar with 18 productions (start state = S[])
|
782 |
+
S[] -> NP[] VP[]
|
783 |
+
PP[] -> Prep[] NP[]
|
784 |
+
NP[] -> NP[] PP[]
|
785 |
+
VP[] -> VP[] PP[]
|
786 |
+
VP[] -> Verb[] NP[]
|
787 |
+
VP[] -> Verb[]
|
788 |
+
NP[] -> Det[pl=?x] Noun[pl=?x]
|
789 |
+
NP[] -> 'John'
|
790 |
+
NP[] -> 'I'
|
791 |
+
Det[] -> 'the'
|
792 |
+
Det[] -> 'my'
|
793 |
+
Det[-pl] -> 'a'
|
794 |
+
Noun[-pl] -> 'dog'
|
795 |
+
Noun[-pl] -> 'cookie'
|
796 |
+
Verb[] -> 'ate'
|
797 |
+
Verb[] -> 'saw'
|
798 |
+
Prep[] -> 'with'
|
799 |
+
Prep[] -> 'under'
|
800 |
+
<BLANKLINE>
|
801 |
+
* FeatureChartParser
|
802 |
+
Sentence: I saw John with a dog
|
803 |
+
|.I.s.J.w.a.d.|
|
804 |
+
|[-] . . . . .| [0:1] 'I'
|
805 |
+
|. [-] . . . .| [1:2] 'saw'
|
806 |
+
|. . [-] . . .| [2:3] 'John'
|
807 |
+
|. . . [-] . .| [3:4] 'with'
|
808 |
+
|. . . . [-] .| [4:5] 'a'
|
809 |
+
|. . . . . [-]| [5:6] 'dog'
|
810 |
+
|[-] . . . . .| [0:1] NP[] -> 'I' *
|
811 |
+
|[-> . . . . .| [0:1] S[] -> NP[] * VP[] {}
|
812 |
+
|[-> . . . . .| [0:1] NP[] -> NP[] * PP[] {}
|
813 |
+
|. [-] . . . .| [1:2] Verb[] -> 'saw' *
|
814 |
+
|. [-> . . . .| [1:2] VP[] -> Verb[] * NP[] {}
|
815 |
+
|. [-] . . . .| [1:2] VP[] -> Verb[] *
|
816 |
+
|. [-> . . . .| [1:2] VP[] -> VP[] * PP[] {}
|
817 |
+
|[---] . . . .| [0:2] S[] -> NP[] VP[] *
|
818 |
+
|. . [-] . . .| [2:3] NP[] -> 'John' *
|
819 |
+
|. . [-> . . .| [2:3] S[] -> NP[] * VP[] {}
|
820 |
+
|. . [-> . . .| [2:3] NP[] -> NP[] * PP[] {}
|
821 |
+
|. [---] . . .| [1:3] VP[] -> Verb[] NP[] *
|
822 |
+
|. [---> . . .| [1:3] VP[] -> VP[] * PP[] {}
|
823 |
+
|[-----] . . .| [0:3] S[] -> NP[] VP[] *
|
824 |
+
|. . . [-] . .| [3:4] Prep[] -> 'with' *
|
825 |
+
|. . . [-> . .| [3:4] PP[] -> Prep[] * NP[] {}
|
826 |
+
|. . . . [-] .| [4:5] Det[-pl] -> 'a' *
|
827 |
+
|. . . . [-> .| [4:5] NP[] -> Det[pl=?x] * Noun[pl=?x] {?x: False}
|
828 |
+
|. . . . . [-]| [5:6] Noun[-pl] -> 'dog' *
|
829 |
+
|. . . . [---]| [4:6] NP[] -> Det[-pl] Noun[-pl] *
|
830 |
+
|. . . . [--->| [4:6] S[] -> NP[] * VP[] {}
|
831 |
+
|. . . . [--->| [4:6] NP[] -> NP[] * PP[] {}
|
832 |
+
|. . . [-----]| [3:6] PP[] -> Prep[] NP[] *
|
833 |
+
|. . [-------]| [2:6] NP[] -> NP[] PP[] *
|
834 |
+
|. [---------]| [1:6] VP[] -> VP[] PP[] *
|
835 |
+
|. [--------->| [1:6] VP[] -> VP[] * PP[] {}
|
836 |
+
|[===========]| [0:6] S[] -> NP[] VP[] *
|
837 |
+
|. . [------->| [2:6] S[] -> NP[] * VP[] {}
|
838 |
+
|. . [------->| [2:6] NP[] -> NP[] * PP[] {}
|
839 |
+
|. [---------]| [1:6] VP[] -> Verb[] NP[] *
|
840 |
+
|. [--------->| [1:6] VP[] -> VP[] * PP[] {}
|
841 |
+
|[===========]| [0:6] S[] -> NP[] VP[] *
|
842 |
+
(S[]
|
843 |
+
(NP[] I)
|
844 |
+
(VP[]
|
845 |
+
(VP[] (Verb[] saw) (NP[] John))
|
846 |
+
(PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog)))))
|
847 |
+
(S[]
|
848 |
+
(NP[] I)
|
849 |
+
(VP[]
|
850 |
+
(Verb[] saw)
|
851 |
+
(NP[]
|
852 |
+
(NP[] John)
|
853 |
+
(PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog))))))
|
854 |
+
|
855 |
+
|
856 |
+
Unit tests for the Feature Chart Parser classes
|
857 |
+
-----------------------------------------------
|
858 |
+
|
859 |
+
The list of parsers we want to test.
|
860 |
+
|
861 |
+
>>> parsers = [nltk.parse.featurechart.FeatureChartParser,
|
862 |
+
... nltk.parse.featurechart.FeatureTopDownChartParser,
|
863 |
+
... nltk.parse.featurechart.FeatureBottomUpChartParser,
|
864 |
+
... nltk.parse.featurechart.FeatureBottomUpLeftCornerChartParser,
|
865 |
+
... nltk.parse.earleychart.FeatureIncrementalChartParser,
|
866 |
+
... nltk.parse.earleychart.FeatureEarleyChartParser,
|
867 |
+
... nltk.parse.earleychart.FeatureIncrementalTopDownChartParser,
|
868 |
+
... nltk.parse.earleychart.FeatureIncrementalBottomUpChartParser,
|
869 |
+
... nltk.parse.earleychart.FeatureIncrementalBottomUpLeftCornerChartParser,
|
870 |
+
... ]
|
871 |
+
|
872 |
+
A helper function that tests each parser on the given grammar and sentence.
|
873 |
+
We check that the number of trees are correct, and that all parsers
|
874 |
+
return the same trees. Otherwise an error is printed.
|
875 |
+
|
876 |
+
>>> def unittest(grammar, sentence, nr_trees):
|
877 |
+
... sentence = sentence.split()
|
878 |
+
... trees = None
|
879 |
+
... for P in parsers:
|
880 |
+
... result = P(grammar).parse(sentence)
|
881 |
+
... result = set(tree.freeze() for tree in result)
|
882 |
+
... if len(result) != nr_trees:
|
883 |
+
... print("Wrong nr of trees:", len(result))
|
884 |
+
... elif trees is None:
|
885 |
+
... trees = result
|
886 |
+
... elif result != trees:
|
887 |
+
... print("Trees differ for parser:", P.__name__)
|
888 |
+
|
889 |
+
The demo grammar from before, with an ambiguous sentence.
|
890 |
+
|
891 |
+
>>> isawjohn = nltk.parse.featurechart.demo_grammar()
|
892 |
+
>>> unittest(isawjohn, "I saw John with a dog with my cookie", 5)
|
893 |
+
|
894 |
+
This grammar tests that variables in different grammar rules are renamed
|
895 |
+
before unification. (The problematic variable is in this case ?X).
|
896 |
+
|
897 |
+
>>> whatwasthat = nltk.grammar.FeatureGrammar.fromstring('''
|
898 |
+
... S[] -> NP[num=?N] VP[num=?N, slash=?X]
|
899 |
+
... NP[num=?X] -> "what"
|
900 |
+
... NP[num=?X] -> "that"
|
901 |
+
... VP[num=?P, slash=none] -> V[num=?P] NP[]
|
902 |
+
... V[num=sg] -> "was"
|
903 |
+
... ''')
|
904 |
+
>>> unittest(whatwasthat, "what was that", 1)
|
905 |
+
|
906 |
+
This grammar tests that the same rule can be used in different places
|
907 |
+
in another rule, and that the variables are properly renamed.
|
908 |
+
|
909 |
+
>>> thislovesthat = nltk.grammar.FeatureGrammar.fromstring('''
|
910 |
+
... S[] -> NP[case=nom] V[] NP[case=acc]
|
911 |
+
... NP[case=?X] -> Pron[case=?X]
|
912 |
+
... Pron[] -> "this"
|
913 |
+
... Pron[] -> "that"
|
914 |
+
... V[] -> "loves"
|
915 |
+
... ''')
|
916 |
+
>>> unittest(thislovesthat, "this loves that", 1)
|
917 |
+
|
918 |
+
|
919 |
+
Tests for loading feature grammar files
|
920 |
+
---------------------------------------
|
921 |
+
|
922 |
+
Alternative 1: first load the grammar, then create the parser.
|
923 |
+
|
924 |
+
>>> fcfg = nltk.data.load('grammars/book_grammars/feat0.fcfg')
|
925 |
+
>>> fcp1 = nltk.parse.FeatureChartParser(fcfg)
|
926 |
+
>>> print((type(fcp1)))
|
927 |
+
<class 'nltk.parse.featurechart.FeatureChartParser'>
|
928 |
+
|
929 |
+
Alternative 2: directly load the parser.
|
930 |
+
|
931 |
+
>>> fcp2 = nltk.parse.load_parser('grammars/book_grammars/feat0.fcfg')
|
932 |
+
>>> print((type(fcp2)))
|
933 |
+
<class 'nltk.parse.featurechart.FeatureChartParser'>
|
venv/lib/python3.10/site-packages/nltk/test/unit/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc
ADDED
Binary file (815 Bytes). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc
ADDED
Binary file (20.6 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (185 Bytes). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc
ADDED
Binary file (5.34 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc
ADDED
Binary file (9.25 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc
ADDED
Binary file (1.11 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc
ADDED
Binary file (6.9 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_counter.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
import unittest
|
9 |
+
|
10 |
+
import pytest
|
11 |
+
|
12 |
+
from nltk import FreqDist
|
13 |
+
from nltk.lm import NgramCounter
|
14 |
+
from nltk.util import everygrams
|
15 |
+
|
16 |
+
|
17 |
+
class TestNgramCounter:
|
18 |
+
"""Tests for NgramCounter that only involve lookup, no modification."""
|
19 |
+
|
20 |
+
@classmethod
|
21 |
+
def setup_class(self):
|
22 |
+
text = [list("abcd"), list("egdbe")]
|
23 |
+
self.trigram_counter = NgramCounter(
|
24 |
+
everygrams(sent, max_len=3) for sent in text
|
25 |
+
)
|
26 |
+
self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text)
|
27 |
+
self.case = unittest.TestCase()
|
28 |
+
|
29 |
+
def test_N(self):
|
30 |
+
assert self.bigram_counter.N() == 16
|
31 |
+
assert self.trigram_counter.N() == 21
|
32 |
+
|
33 |
+
def test_counter_len_changes_with_lookup(self):
|
34 |
+
assert len(self.bigram_counter) == 2
|
35 |
+
self.bigram_counter[50]
|
36 |
+
assert len(self.bigram_counter) == 3
|
37 |
+
|
38 |
+
def test_ngram_order_access_unigrams(self):
|
39 |
+
assert self.bigram_counter[1] == self.bigram_counter.unigrams
|
40 |
+
|
41 |
+
def test_ngram_conditional_freqdist(self):
|
42 |
+
case = unittest.TestCase()
|
43 |
+
expected_trigram_contexts = [
|
44 |
+
("a", "b"),
|
45 |
+
("b", "c"),
|
46 |
+
("e", "g"),
|
47 |
+
("g", "d"),
|
48 |
+
("d", "b"),
|
49 |
+
]
|
50 |
+
expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)]
|
51 |
+
|
52 |
+
bigrams = self.trigram_counter[2]
|
53 |
+
trigrams = self.trigram_counter[3]
|
54 |
+
|
55 |
+
self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions())
|
56 |
+
self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions())
|
57 |
+
|
58 |
+
def test_bigram_counts_seen_ngrams(self):
|
59 |
+
assert self.bigram_counter[["a"]]["b"] == 1
|
60 |
+
assert self.bigram_counter[["b"]]["c"] == 1
|
61 |
+
|
62 |
+
def test_bigram_counts_unseen_ngrams(self):
|
63 |
+
assert self.bigram_counter[["b"]]["z"] == 0
|
64 |
+
|
65 |
+
def test_unigram_counts_seen_words(self):
|
66 |
+
assert self.bigram_counter["b"] == 2
|
67 |
+
|
68 |
+
def test_unigram_counts_completely_unseen_words(self):
|
69 |
+
assert self.bigram_counter["z"] == 0
|
70 |
+
|
71 |
+
|
72 |
+
class TestNgramCounterTraining:
|
73 |
+
@classmethod
|
74 |
+
def setup_class(self):
|
75 |
+
self.counter = NgramCounter()
|
76 |
+
self.case = unittest.TestCase()
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("case", ["", [], None])
|
79 |
+
def test_empty_inputs(self, case):
|
80 |
+
test = NgramCounter(case)
|
81 |
+
assert 2 not in test
|
82 |
+
assert test[1] == FreqDist()
|
83 |
+
|
84 |
+
def test_train_on_unigrams(self):
|
85 |
+
words = list("abcd")
|
86 |
+
counter = NgramCounter([[(w,) for w in words]])
|
87 |
+
|
88 |
+
assert not counter[3]
|
89 |
+
assert not counter[2]
|
90 |
+
self.case.assertCountEqual(words, counter[1].keys())
|
91 |
+
|
92 |
+
def test_train_on_illegal_sentences(self):
|
93 |
+
str_sent = ["Check", "this", "out", "!"]
|
94 |
+
list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]]
|
95 |
+
|
96 |
+
with pytest.raises(TypeError):
|
97 |
+
NgramCounter([str_sent])
|
98 |
+
|
99 |
+
with pytest.raises(TypeError):
|
100 |
+
NgramCounter([list_sent])
|
101 |
+
|
102 |
+
def test_train_on_bigrams(self):
|
103 |
+
bigram_sent = [("a", "b"), ("c", "d")]
|
104 |
+
counter = NgramCounter([bigram_sent])
|
105 |
+
assert not bool(counter[3])
|
106 |
+
|
107 |
+
def test_train_on_mix(self):
|
108 |
+
mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)]
|
109 |
+
counter = NgramCounter([mixed_sent])
|
110 |
+
unigrams = ["h"]
|
111 |
+
bigram_contexts = [("a",), ("c",)]
|
112 |
+
trigram_contexts = [("e", "f")]
|
113 |
+
|
114 |
+
self.case.assertCountEqual(unigrams, counter[1].keys())
|
115 |
+
self.case.assertCountEqual(bigram_contexts, counter[2].keys())
|
116 |
+
self.case.assertCountEqual(trigram_contexts, counter[3].keys())
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_models.py
ADDED
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
import math
|
8 |
+
from operator import itemgetter
|
9 |
+
|
10 |
+
import pytest
|
11 |
+
|
12 |
+
from nltk.lm import (
|
13 |
+
MLE,
|
14 |
+
AbsoluteDiscountingInterpolated,
|
15 |
+
KneserNeyInterpolated,
|
16 |
+
Laplace,
|
17 |
+
Lidstone,
|
18 |
+
StupidBackoff,
|
19 |
+
Vocabulary,
|
20 |
+
WittenBellInterpolated,
|
21 |
+
)
|
22 |
+
from nltk.lm.preprocessing import padded_everygrams
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture(scope="session")
|
26 |
+
def vocabulary():
|
27 |
+
return Vocabulary(["a", "b", "c", "d", "z", "<s>", "</s>"], unk_cutoff=1)
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture(scope="session")
|
31 |
+
def training_data():
|
32 |
+
return [["a", "b", "c", "d"], ["e", "g", "a", "d", "b", "e"]]
|
33 |
+
|
34 |
+
|
35 |
+
@pytest.fixture(scope="session")
|
36 |
+
def bigram_training_data(training_data):
|
37 |
+
return [list(padded_everygrams(2, sent)) for sent in training_data]
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture(scope="session")
|
41 |
+
def trigram_training_data(training_data):
|
42 |
+
return [list(padded_everygrams(3, sent)) for sent in training_data]
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def mle_bigram_model(vocabulary, bigram_training_data):
|
47 |
+
model = MLE(2, vocabulary=vocabulary)
|
48 |
+
model.fit(bigram_training_data)
|
49 |
+
return model
|
50 |
+
|
51 |
+
|
52 |
+
@pytest.mark.parametrize(
|
53 |
+
"word, context, expected_score",
|
54 |
+
[
|
55 |
+
("d", ["c"], 1),
|
56 |
+
# Unseen ngrams should yield 0
|
57 |
+
("d", ["e"], 0),
|
58 |
+
# Unigrams should also be 0
|
59 |
+
("z", None, 0),
|
60 |
+
# N unigrams = 14
|
61 |
+
# count('a') = 2
|
62 |
+
("a", None, 2.0 / 14),
|
63 |
+
# count('y') = 3
|
64 |
+
("y", None, 3.0 / 14),
|
65 |
+
],
|
66 |
+
)
|
67 |
+
def test_mle_bigram_scores(mle_bigram_model, word, context, expected_score):
|
68 |
+
assert pytest.approx(mle_bigram_model.score(word, context), 1e-4) == expected_score
|
69 |
+
|
70 |
+
|
71 |
+
def test_mle_bigram_logscore_for_zero_score(mle_bigram_model):
|
72 |
+
assert math.isinf(mle_bigram_model.logscore("d", ["e"]))
|
73 |
+
|
74 |
+
|
75 |
+
def test_mle_bigram_entropy_perplexity_seen(mle_bigram_model):
|
76 |
+
# ngrams seen during training
|
77 |
+
trained = [
|
78 |
+
("<s>", "a"),
|
79 |
+
("a", "b"),
|
80 |
+
("b", "<UNK>"),
|
81 |
+
("<UNK>", "a"),
|
82 |
+
("a", "d"),
|
83 |
+
("d", "</s>"),
|
84 |
+
]
|
85 |
+
# Ngram = Log score
|
86 |
+
# <s>, a = -1
|
87 |
+
# a, b = -1
|
88 |
+
# b, UNK = -1
|
89 |
+
# UNK, a = -1.585
|
90 |
+
# a, d = -1
|
91 |
+
# d, </s> = -1
|
92 |
+
# TOTAL logscores = -6.585
|
93 |
+
# - AVG logscores = 1.0975
|
94 |
+
H = 1.0975
|
95 |
+
perplexity = 2.1398
|
96 |
+
assert pytest.approx(mle_bigram_model.entropy(trained), 1e-4) == H
|
97 |
+
assert pytest.approx(mle_bigram_model.perplexity(trained), 1e-4) == perplexity
|
98 |
+
|
99 |
+
|
100 |
+
def test_mle_bigram_entropy_perplexity_unseen(mle_bigram_model):
|
101 |
+
# In MLE, even one unseen ngram should make entropy and perplexity infinite
|
102 |
+
untrained = [("<s>", "a"), ("a", "c"), ("c", "d"), ("d", "</s>")]
|
103 |
+
|
104 |
+
assert math.isinf(mle_bigram_model.entropy(untrained))
|
105 |
+
assert math.isinf(mle_bigram_model.perplexity(untrained))
|
106 |
+
|
107 |
+
|
108 |
+
def test_mle_bigram_entropy_perplexity_unigrams(mle_bigram_model):
|
109 |
+
# word = score, log score
|
110 |
+
# <s> = 0.1429, -2.8074
|
111 |
+
# a = 0.1429, -2.8074
|
112 |
+
# c = 0.0714, -3.8073
|
113 |
+
# UNK = 0.2143, -2.2224
|
114 |
+
# d = 0.1429, -2.8074
|
115 |
+
# c = 0.0714, -3.8073
|
116 |
+
# </s> = 0.1429, -2.8074
|
117 |
+
# TOTAL logscores = -21.6243
|
118 |
+
# - AVG logscores = 3.0095
|
119 |
+
H = 3.0095
|
120 |
+
perplexity = 8.0529
|
121 |
+
|
122 |
+
text = [("<s>",), ("a",), ("c",), ("-",), ("d",), ("c",), ("</s>",)]
|
123 |
+
|
124 |
+
assert pytest.approx(mle_bigram_model.entropy(text), 1e-4) == H
|
125 |
+
assert pytest.approx(mle_bigram_model.perplexity(text), 1e-4) == perplexity
|
126 |
+
|
127 |
+
|
128 |
+
@pytest.fixture
|
129 |
+
def mle_trigram_model(trigram_training_data, vocabulary):
|
130 |
+
model = MLE(order=3, vocabulary=vocabulary)
|
131 |
+
model.fit(trigram_training_data)
|
132 |
+
return model
|
133 |
+
|
134 |
+
|
135 |
+
@pytest.mark.parametrize(
|
136 |
+
"word, context, expected_score",
|
137 |
+
[
|
138 |
+
# count(d | b, c) = 1
|
139 |
+
# count(b, c) = 1
|
140 |
+
("d", ("b", "c"), 1),
|
141 |
+
# count(d | c) = 1
|
142 |
+
# count(c) = 1
|
143 |
+
("d", ["c"], 1),
|
144 |
+
# total number of tokens is 18, of which "a" occurred 2 times
|
145 |
+
("a", None, 2.0 / 18),
|
146 |
+
# in vocabulary but unseen
|
147 |
+
("z", None, 0),
|
148 |
+
# out of vocabulary should use "UNK" score
|
149 |
+
("y", None, 3.0 / 18),
|
150 |
+
],
|
151 |
+
)
|
152 |
+
def test_mle_trigram_scores(mle_trigram_model, word, context, expected_score):
|
153 |
+
assert pytest.approx(mle_trigram_model.score(word, context), 1e-4) == expected_score
|
154 |
+
|
155 |
+
|
156 |
+
@pytest.fixture
|
157 |
+
def lidstone_bigram_model(bigram_training_data, vocabulary):
|
158 |
+
model = Lidstone(0.1, order=2, vocabulary=vocabulary)
|
159 |
+
model.fit(bigram_training_data)
|
160 |
+
return model
|
161 |
+
|
162 |
+
|
163 |
+
@pytest.mark.parametrize(
|
164 |
+
"word, context, expected_score",
|
165 |
+
[
|
166 |
+
# count(d | c) = 1
|
167 |
+
# *count(d | c) = 1.1
|
168 |
+
# Count(w | c for w in vocab) = 1
|
169 |
+
# *Count(w | c for w in vocab) = 1.8
|
170 |
+
("d", ["c"], 1.1 / 1.8),
|
171 |
+
# Total unigrams: 14
|
172 |
+
# Vocab size: 8
|
173 |
+
# Denominator: 14 + 0.8 = 14.8
|
174 |
+
# count("a") = 2
|
175 |
+
# *count("a") = 2.1
|
176 |
+
("a", None, 2.1 / 14.8),
|
177 |
+
# in vocabulary but unseen
|
178 |
+
# count("z") = 0
|
179 |
+
# *count("z") = 0.1
|
180 |
+
("z", None, 0.1 / 14.8),
|
181 |
+
# out of vocabulary should use "UNK" score
|
182 |
+
# count("<UNK>") = 3
|
183 |
+
# *count("<UNK>") = 3.1
|
184 |
+
("y", None, 3.1 / 14.8),
|
185 |
+
],
|
186 |
+
)
|
187 |
+
def test_lidstone_bigram_score(lidstone_bigram_model, word, context, expected_score):
|
188 |
+
assert (
|
189 |
+
pytest.approx(lidstone_bigram_model.score(word, context), 1e-4)
|
190 |
+
== expected_score
|
191 |
+
)
|
192 |
+
|
193 |
+
|
194 |
+
def test_lidstone_entropy_perplexity(lidstone_bigram_model):
|
195 |
+
text = [
|
196 |
+
("<s>", "a"),
|
197 |
+
("a", "c"),
|
198 |
+
("c", "<UNK>"),
|
199 |
+
("<UNK>", "d"),
|
200 |
+
("d", "c"),
|
201 |
+
("c", "</s>"),
|
202 |
+
]
|
203 |
+
# Unlike MLE this should be able to handle completely novel ngrams
|
204 |
+
# Ngram = score, log score
|
205 |
+
# <s>, a = 0.3929, -1.3479
|
206 |
+
# a, c = 0.0357, -4.8074
|
207 |
+
# c, UNK = 0.0(5), -4.1699
|
208 |
+
# UNK, d = 0.0263, -5.2479
|
209 |
+
# d, c = 0.0357, -4.8074
|
210 |
+
# c, </s> = 0.0(5), -4.1699
|
211 |
+
# TOTAL logscore: −24.5504
|
212 |
+
# - AVG logscore: 4.0917
|
213 |
+
H = 4.0917
|
214 |
+
perplexity = 17.0504
|
215 |
+
assert pytest.approx(lidstone_bigram_model.entropy(text), 1e-4) == H
|
216 |
+
assert pytest.approx(lidstone_bigram_model.perplexity(text), 1e-4) == perplexity
|
217 |
+
|
218 |
+
|
219 |
+
@pytest.fixture
|
220 |
+
def lidstone_trigram_model(trigram_training_data, vocabulary):
|
221 |
+
model = Lidstone(0.1, order=3, vocabulary=vocabulary)
|
222 |
+
model.fit(trigram_training_data)
|
223 |
+
return model
|
224 |
+
|
225 |
+
|
226 |
+
@pytest.mark.parametrize(
|
227 |
+
"word, context, expected_score",
|
228 |
+
[
|
229 |
+
# Logic behind this is the same as for bigram model
|
230 |
+
("d", ["c"], 1.1 / 1.8),
|
231 |
+
# if we choose a word that hasn't appeared after (b, c)
|
232 |
+
("e", ["c"], 0.1 / 1.8),
|
233 |
+
# Trigram score now
|
234 |
+
("d", ["b", "c"], 1.1 / 1.8),
|
235 |
+
("e", ["b", "c"], 0.1 / 1.8),
|
236 |
+
],
|
237 |
+
)
|
238 |
+
def test_lidstone_trigram_score(lidstone_trigram_model, word, context, expected_score):
|
239 |
+
assert (
|
240 |
+
pytest.approx(lidstone_trigram_model.score(word, context), 1e-4)
|
241 |
+
== expected_score
|
242 |
+
)
|
243 |
+
|
244 |
+
|
245 |
+
@pytest.fixture
|
246 |
+
def laplace_bigram_model(bigram_training_data, vocabulary):
|
247 |
+
model = Laplace(2, vocabulary=vocabulary)
|
248 |
+
model.fit(bigram_training_data)
|
249 |
+
return model
|
250 |
+
|
251 |
+
|
252 |
+
@pytest.mark.parametrize(
|
253 |
+
"word, context, expected_score",
|
254 |
+
[
|
255 |
+
# basic sanity-check:
|
256 |
+
# count(d | c) = 1
|
257 |
+
# *count(d | c) = 2
|
258 |
+
# Count(w | c for w in vocab) = 1
|
259 |
+
# *Count(w | c for w in vocab) = 9
|
260 |
+
("d", ["c"], 2.0 / 9),
|
261 |
+
# Total unigrams: 14
|
262 |
+
# Vocab size: 8
|
263 |
+
# Denominator: 14 + 8 = 22
|
264 |
+
# count("a") = 2
|
265 |
+
# *count("a") = 3
|
266 |
+
("a", None, 3.0 / 22),
|
267 |
+
# in vocabulary but unseen
|
268 |
+
# count("z") = 0
|
269 |
+
# *count("z") = 1
|
270 |
+
("z", None, 1.0 / 22),
|
271 |
+
# out of vocabulary should use "UNK" score
|
272 |
+
# count("<UNK>") = 3
|
273 |
+
# *count("<UNK>") = 4
|
274 |
+
("y", None, 4.0 / 22),
|
275 |
+
],
|
276 |
+
)
|
277 |
+
def test_laplace_bigram_score(laplace_bigram_model, word, context, expected_score):
|
278 |
+
assert (
|
279 |
+
pytest.approx(laplace_bigram_model.score(word, context), 1e-4) == expected_score
|
280 |
+
)
|
281 |
+
|
282 |
+
|
283 |
+
def test_laplace_bigram_entropy_perplexity(laplace_bigram_model):
|
284 |
+
text = [
|
285 |
+
("<s>", "a"),
|
286 |
+
("a", "c"),
|
287 |
+
("c", "<UNK>"),
|
288 |
+
("<UNK>", "d"),
|
289 |
+
("d", "c"),
|
290 |
+
("c", "</s>"),
|
291 |
+
]
|
292 |
+
# Unlike MLE this should be able to handle completely novel ngrams
|
293 |
+
# Ngram = score, log score
|
294 |
+
# <s>, a = 0.2, -2.3219
|
295 |
+
# a, c = 0.1, -3.3219
|
296 |
+
# c, UNK = 0.(1), -3.1699
|
297 |
+
# UNK, d = 0.(09), 3.4594
|
298 |
+
# d, c = 0.1 -3.3219
|
299 |
+
# c, </s> = 0.(1), -3.1699
|
300 |
+
# Total logscores: −18.7651
|
301 |
+
# - AVG logscores: 3.1275
|
302 |
+
H = 3.1275
|
303 |
+
perplexity = 8.7393
|
304 |
+
assert pytest.approx(laplace_bigram_model.entropy(text), 1e-4) == H
|
305 |
+
assert pytest.approx(laplace_bigram_model.perplexity(text), 1e-4) == perplexity
|
306 |
+
|
307 |
+
|
308 |
+
def test_laplace_gamma(laplace_bigram_model):
|
309 |
+
assert laplace_bigram_model.gamma == 1
|
310 |
+
|
311 |
+
|
312 |
+
@pytest.fixture
|
313 |
+
def wittenbell_trigram_model(trigram_training_data, vocabulary):
|
314 |
+
model = WittenBellInterpolated(3, vocabulary=vocabulary)
|
315 |
+
model.fit(trigram_training_data)
|
316 |
+
return model
|
317 |
+
|
318 |
+
|
319 |
+
@pytest.mark.parametrize(
|
320 |
+
"word, context, expected_score",
|
321 |
+
[
|
322 |
+
# For unigram scores by default revert to regular MLE
|
323 |
+
# Total unigrams: 18
|
324 |
+
# Vocab Size = 7
|
325 |
+
# count('c'): 1
|
326 |
+
("c", None, 1.0 / 18),
|
327 |
+
# in vocabulary but unseen
|
328 |
+
# count("z") = 0
|
329 |
+
("z", None, 0 / 18),
|
330 |
+
# out of vocabulary should use "UNK" score
|
331 |
+
# count("<UNK>") = 3
|
332 |
+
("y", None, 3.0 / 18),
|
333 |
+
# 2 words follow b and b occurred a total of 2 times
|
334 |
+
# gamma(['b']) = 2 / (2 + 2) = 0.5
|
335 |
+
# mle.score('c', ['b']) = 0.5
|
336 |
+
# mle('c') = 1 / 18 = 0.055
|
337 |
+
# (1 - gamma) * mle + gamma * mle('c') ~= 0.27 + 0.055
|
338 |
+
("c", ["b"], (1 - 0.5) * 0.5 + 0.5 * 1 / 18),
|
339 |
+
# building on that, let's try 'a b c' as the trigram
|
340 |
+
# 1 word follows 'a b' and 'a b' occurred 1 time
|
341 |
+
# gamma(['a', 'b']) = 1 / (1 + 1) = 0.5
|
342 |
+
# mle("c", ["a", "b"]) = 1
|
343 |
+
("c", ["a", "b"], (1 - 0.5) + 0.5 * ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)),
|
344 |
+
# P(c|zb)
|
345 |
+
# The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
|
346 |
+
("c", ["z", "b"], ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)),
|
347 |
+
],
|
348 |
+
)
|
349 |
+
def test_wittenbell_trigram_score(
|
350 |
+
wittenbell_trigram_model, word, context, expected_score
|
351 |
+
):
|
352 |
+
assert (
|
353 |
+
pytest.approx(wittenbell_trigram_model.score(word, context), 1e-4)
|
354 |
+
== expected_score
|
355 |
+
)
|
356 |
+
|
357 |
+
|
358 |
+
###############################################################################
|
359 |
+
# Notation Explained #
|
360 |
+
###############################################################################
|
361 |
+
# For all subsequent calculations we use the following notation:
|
362 |
+
# 1. '*': Placeholder for any word/character. E.g. '*b' stands for
|
363 |
+
# all bigrams that end in 'b'. '*b*' stands for all trigrams that
|
364 |
+
# contain 'b' in the middle.
|
365 |
+
# 1. count(ngram): Count all instances (tokens) of an ngram.
|
366 |
+
# 1. unique(ngram): Count unique instances (types) of an ngram.
|
367 |
+
|
368 |
+
|
369 |
+
@pytest.fixture
|
370 |
+
def kneserney_trigram_model(trigram_training_data, vocabulary):
|
371 |
+
model = KneserNeyInterpolated(order=3, discount=0.75, vocabulary=vocabulary)
|
372 |
+
model.fit(trigram_training_data)
|
373 |
+
return model
|
374 |
+
|
375 |
+
|
376 |
+
@pytest.mark.parametrize(
|
377 |
+
"word, context, expected_score",
|
378 |
+
[
|
379 |
+
# P(c) = count('*c') / unique('**')
|
380 |
+
# = 1 / 14
|
381 |
+
("c", None, 1.0 / 14),
|
382 |
+
# P(z) = count('*z') / unique('**')
|
383 |
+
# = 0 / 14
|
384 |
+
# 'z' is in the vocabulary, but it was not seen during training.
|
385 |
+
("z", None, 0.0 / 14),
|
386 |
+
# P(y)
|
387 |
+
# Out of vocabulary should use "UNK" score.
|
388 |
+
# P(y) = P(UNK) = count('*UNK') / unique('**')
|
389 |
+
("y", None, 3 / 14),
|
390 |
+
# We start with P(c|b)
|
391 |
+
# P(c|b) = alpha('bc') + gamma('b') * P(c)
|
392 |
+
# alpha('bc') = max(unique('*bc') - discount, 0) / unique('*b*')
|
393 |
+
# = max(1 - 0.75, 0) / 2
|
394 |
+
# = 0.125
|
395 |
+
# gamma('b') = discount * unique('b*') / unique('*b*')
|
396 |
+
# = (0.75 * 2) / 2
|
397 |
+
# = 0.75
|
398 |
+
("c", ["b"], (0.125 + 0.75 * (1 / 14))),
|
399 |
+
# Building on that, let's try P(c|ab).
|
400 |
+
# P(c|ab) = alpha('abc') + gamma('ab') * P(c|b)
|
401 |
+
# alpha('abc') = max(count('abc') - discount, 0) / count('ab*')
|
402 |
+
# = max(1 - 0.75, 0) / 1
|
403 |
+
# = 0.25
|
404 |
+
# gamma('ab') = (discount * unique('ab*')) / count('ab*')
|
405 |
+
# = 0.75 * 1 / 1
|
406 |
+
("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (1 / 14))),
|
407 |
+
# P(c|zb)
|
408 |
+
# The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
|
409 |
+
("c", ["z", "b"], (0.125 + 0.75 * (1 / 14))),
|
410 |
+
],
|
411 |
+
)
|
412 |
+
def test_kneserney_trigram_score(
|
413 |
+
kneserney_trigram_model, word, context, expected_score
|
414 |
+
):
|
415 |
+
assert (
|
416 |
+
pytest.approx(kneserney_trigram_model.score(word, context), 1e-4)
|
417 |
+
== expected_score
|
418 |
+
)
|
419 |
+
|
420 |
+
|
421 |
+
@pytest.fixture
|
422 |
+
def absolute_discounting_trigram_model(trigram_training_data, vocabulary):
|
423 |
+
model = AbsoluteDiscountingInterpolated(order=3, vocabulary=vocabulary)
|
424 |
+
model.fit(trigram_training_data)
|
425 |
+
return model
|
426 |
+
|
427 |
+
|
428 |
+
@pytest.mark.parametrize(
|
429 |
+
"word, context, expected_score",
|
430 |
+
[
|
431 |
+
# For unigram scores revert to uniform
|
432 |
+
# P(c) = count('c') / count('**')
|
433 |
+
("c", None, 1.0 / 18),
|
434 |
+
# in vocabulary but unseen
|
435 |
+
# count('z') = 0
|
436 |
+
("z", None, 0.0 / 18),
|
437 |
+
# out of vocabulary should use "UNK" score
|
438 |
+
# count('<UNK>') = 3
|
439 |
+
("y", None, 3 / 18),
|
440 |
+
# P(c|b) = alpha('bc') + gamma('b') * P(c)
|
441 |
+
# alpha('bc') = max(count('bc') - discount, 0) / count('b*')
|
442 |
+
# = max(1 - 0.75, 0) / 2
|
443 |
+
# = 0.125
|
444 |
+
# gamma('b') = discount * unique('b*') / count('b*')
|
445 |
+
# = (0.75 * 2) / 2
|
446 |
+
# = 0.75
|
447 |
+
("c", ["b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))),
|
448 |
+
# Building on that, let's try P(c|ab).
|
449 |
+
# P(c|ab) = alpha('abc') + gamma('ab') * P(c|b)
|
450 |
+
# alpha('abc') = max(count('abc') - discount, 0) / count('ab*')
|
451 |
+
# = max(1 - 0.75, 0) / 1
|
452 |
+
# = 0.25
|
453 |
+
# gamma('ab') = (discount * unique('ab*')) / count('ab*')
|
454 |
+
# = 0.75 * 1 / 1
|
455 |
+
("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (2 / 2) * (1 / 18))),
|
456 |
+
# P(c|zb)
|
457 |
+
# The ngram 'zbc' was not seen, so we use P(c|b). See issue #2332.
|
458 |
+
("c", ["z", "b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))),
|
459 |
+
],
|
460 |
+
)
|
461 |
+
def test_absolute_discounting_trigram_score(
|
462 |
+
absolute_discounting_trigram_model, word, context, expected_score
|
463 |
+
):
|
464 |
+
assert (
|
465 |
+
pytest.approx(absolute_discounting_trigram_model.score(word, context), 1e-4)
|
466 |
+
== expected_score
|
467 |
+
)
|
468 |
+
|
469 |
+
|
470 |
+
@pytest.fixture
|
471 |
+
def stupid_backoff_trigram_model(trigram_training_data, vocabulary):
|
472 |
+
model = StupidBackoff(order=3, vocabulary=vocabulary)
|
473 |
+
model.fit(trigram_training_data)
|
474 |
+
return model
|
475 |
+
|
476 |
+
|
477 |
+
@pytest.mark.parametrize(
|
478 |
+
"word, context, expected_score",
|
479 |
+
[
|
480 |
+
# For unigram scores revert to uniform
|
481 |
+
# total bigrams = 18
|
482 |
+
("c", None, 1.0 / 18),
|
483 |
+
# in vocabulary but unseen
|
484 |
+
# bigrams ending with z = 0
|
485 |
+
("z", None, 0.0 / 18),
|
486 |
+
# out of vocabulary should use "UNK" score
|
487 |
+
# count('<UNK>'): 3
|
488 |
+
("y", None, 3 / 18),
|
489 |
+
# c follows 1 time out of 2 after b
|
490 |
+
("c", ["b"], 1 / 2),
|
491 |
+
# c always follows ab
|
492 |
+
("c", ["a", "b"], 1 / 1),
|
493 |
+
# The ngram 'z b c' was not seen, so we backoff to
|
494 |
+
# the score of the ngram 'b c' * smoothing factor
|
495 |
+
("c", ["z", "b"], (0.4 * (1 / 2))),
|
496 |
+
],
|
497 |
+
)
|
498 |
+
def test_stupid_backoff_trigram_score(
|
499 |
+
stupid_backoff_trigram_model, word, context, expected_score
|
500 |
+
):
|
501 |
+
assert (
|
502 |
+
pytest.approx(stupid_backoff_trigram_model.score(word, context), 1e-4)
|
503 |
+
== expected_score
|
504 |
+
)
|
505 |
+
|
506 |
+
|
507 |
+
###############################################################################
|
508 |
+
# Probability Distributions Should Sum up to Unity #
|
509 |
+
###############################################################################
|
510 |
+
|
511 |
+
|
512 |
+
@pytest.fixture(scope="session")
|
513 |
+
def kneserney_bigram_model(bigram_training_data, vocabulary):
|
514 |
+
model = KneserNeyInterpolated(order=2, vocabulary=vocabulary)
|
515 |
+
model.fit(bigram_training_data)
|
516 |
+
return model
|
517 |
+
|
518 |
+
|
519 |
+
@pytest.mark.parametrize(
|
520 |
+
"model_fixture",
|
521 |
+
[
|
522 |
+
"mle_bigram_model",
|
523 |
+
"mle_trigram_model",
|
524 |
+
"lidstone_bigram_model",
|
525 |
+
"laplace_bigram_model",
|
526 |
+
"wittenbell_trigram_model",
|
527 |
+
"absolute_discounting_trigram_model",
|
528 |
+
"kneserney_bigram_model",
|
529 |
+
pytest.param(
|
530 |
+
"stupid_backoff_trigram_model",
|
531 |
+
marks=pytest.mark.xfail(
|
532 |
+
reason="Stupid Backoff is not a valid distribution"
|
533 |
+
),
|
534 |
+
),
|
535 |
+
],
|
536 |
+
)
|
537 |
+
@pytest.mark.parametrize(
|
538 |
+
"context",
|
539 |
+
[("a",), ("c",), ("<s>",), ("b",), ("<UNK>",), ("d",), ("e",), ("r",), ("w",)],
|
540 |
+
ids=itemgetter(0),
|
541 |
+
)
|
542 |
+
def test_sums_to_1(model_fixture, context, request):
|
543 |
+
model = request.getfixturevalue(model_fixture)
|
544 |
+
scores_for_context = sum(model.score(w, context) for w in model.vocab)
|
545 |
+
assert pytest.approx(scores_for_context, 1e-7) == 1.0
|
546 |
+
|
547 |
+
|
548 |
+
###############################################################################
|
549 |
+
# Generating Text #
|
550 |
+
###############################################################################
|
551 |
+
|
552 |
+
|
553 |
+
def test_generate_one_no_context(mle_trigram_model):
|
554 |
+
assert mle_trigram_model.generate(random_seed=3) == "<UNK>"
|
555 |
+
|
556 |
+
|
557 |
+
def test_generate_one_from_limiting_context(mle_trigram_model):
|
558 |
+
# We don't need random_seed for contexts with only one continuation
|
559 |
+
assert mle_trigram_model.generate(text_seed=["c"]) == "d"
|
560 |
+
assert mle_trigram_model.generate(text_seed=["b", "c"]) == "d"
|
561 |
+
assert mle_trigram_model.generate(text_seed=["a", "c"]) == "d"
|
562 |
+
|
563 |
+
|
564 |
+
def test_generate_one_from_varied_context(mle_trigram_model):
|
565 |
+
# When context doesn't limit our options enough, seed the random choice
|
566 |
+
assert mle_trigram_model.generate(text_seed=("a", "<s>"), random_seed=2) == "a"
|
567 |
+
|
568 |
+
|
569 |
+
def test_generate_cycle(mle_trigram_model):
|
570 |
+
# Add a cycle to the model: bd -> b, db -> d
|
571 |
+
more_training_text = [padded_everygrams(mle_trigram_model.order, list("bdbdbd"))]
|
572 |
+
|
573 |
+
mle_trigram_model.fit(more_training_text)
|
574 |
+
# Test that we can escape the cycle
|
575 |
+
assert mle_trigram_model.generate(7, text_seed=("b", "d"), random_seed=5) == [
|
576 |
+
"b",
|
577 |
+
"d",
|
578 |
+
"b",
|
579 |
+
"d",
|
580 |
+
"b",
|
581 |
+
"d",
|
582 |
+
"</s>",
|
583 |
+
]
|
584 |
+
|
585 |
+
|
586 |
+
def test_generate_with_text_seed(mle_trigram_model):
|
587 |
+
assert mle_trigram_model.generate(5, text_seed=("<s>", "e"), random_seed=3) == [
|
588 |
+
"<UNK>",
|
589 |
+
"a",
|
590 |
+
"d",
|
591 |
+
"b",
|
592 |
+
"<UNK>",
|
593 |
+
]
|
594 |
+
|
595 |
+
|
596 |
+
def test_generate_oov_text_seed(mle_trigram_model):
|
597 |
+
assert mle_trigram_model.generate(
|
598 |
+
text_seed=("aliens",), random_seed=3
|
599 |
+
) == mle_trigram_model.generate(text_seed=("<UNK>",), random_seed=3)
|
600 |
+
|
601 |
+
|
602 |
+
def test_generate_None_text_seed(mle_trigram_model):
|
603 |
+
# should crash with type error when we try to look it up in vocabulary
|
604 |
+
with pytest.raises(TypeError):
|
605 |
+
mle_trigram_model.generate(text_seed=(None,))
|
606 |
+
|
607 |
+
# This will work
|
608 |
+
assert mle_trigram_model.generate(
|
609 |
+
text_seed=None, random_seed=3
|
610 |
+
) == mle_trigram_model.generate(random_seed=3)
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_preprocessing.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
import unittest
|
8 |
+
|
9 |
+
from nltk.lm.preprocessing import padded_everygram_pipeline
|
10 |
+
|
11 |
+
|
12 |
+
class TestPreprocessing(unittest.TestCase):
|
13 |
+
def test_padded_everygram_pipeline(self):
|
14 |
+
expected_train = [
|
15 |
+
[
|
16 |
+
("<s>",),
|
17 |
+
("<s>", "a"),
|
18 |
+
("a",),
|
19 |
+
("a", "b"),
|
20 |
+
("b",),
|
21 |
+
("b", "c"),
|
22 |
+
("c",),
|
23 |
+
("c", "</s>"),
|
24 |
+
("</s>",),
|
25 |
+
]
|
26 |
+
]
|
27 |
+
expected_vocab = ["<s>", "a", "b", "c", "</s>"]
|
28 |
+
train_data, vocab_data = padded_everygram_pipeline(2, [["a", "b", "c"]])
|
29 |
+
self.assertEqual([list(sent) for sent in train_data], expected_train)
|
30 |
+
self.assertEqual(list(vocab_data), expected_vocab)
|
venv/lib/python3.10/site-packages/nltk/test/unit/lm/test_vocabulary.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Language Model Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Ilia Kurenkov <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
import unittest
|
9 |
+
from collections import Counter
|
10 |
+
from timeit import timeit
|
11 |
+
|
12 |
+
from nltk.lm import Vocabulary
|
13 |
+
|
14 |
+
|
15 |
+
class NgramModelVocabularyTests(unittest.TestCase):
|
16 |
+
"""tests Vocabulary Class"""
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
def setUpClass(cls):
|
20 |
+
cls.vocab = Vocabulary(
|
21 |
+
["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"],
|
22 |
+
unk_cutoff=2,
|
23 |
+
)
|
24 |
+
|
25 |
+
def test_truthiness(self):
|
26 |
+
self.assertTrue(self.vocab)
|
27 |
+
|
28 |
+
def test_cutoff_value_set_correctly(self):
|
29 |
+
self.assertEqual(self.vocab.cutoff, 2)
|
30 |
+
|
31 |
+
def test_unable_to_change_cutoff(self):
|
32 |
+
with self.assertRaises(AttributeError):
|
33 |
+
self.vocab.cutoff = 3
|
34 |
+
|
35 |
+
def test_cutoff_setter_checks_value(self):
|
36 |
+
with self.assertRaises(ValueError) as exc_info:
|
37 |
+
Vocabulary("abc", unk_cutoff=0)
|
38 |
+
expected_error_msg = "Cutoff value cannot be less than 1. Got: 0"
|
39 |
+
self.assertEqual(expected_error_msg, str(exc_info.exception))
|
40 |
+
|
41 |
+
def test_counts_set_correctly(self):
|
42 |
+
self.assertEqual(self.vocab.counts["a"], 2)
|
43 |
+
self.assertEqual(self.vocab.counts["b"], 2)
|
44 |
+
self.assertEqual(self.vocab.counts["c"], 1)
|
45 |
+
|
46 |
+
def test_membership_check_respects_cutoff(self):
|
47 |
+
# a was seen 2 times, so it should be considered part of the vocabulary
|
48 |
+
self.assertTrue("a" in self.vocab)
|
49 |
+
# "c" was seen once, it shouldn't be considered part of the vocab
|
50 |
+
self.assertFalse("c" in self.vocab)
|
51 |
+
# "z" was never seen at all, also shouldn't be considered in the vocab
|
52 |
+
self.assertFalse("z" in self.vocab)
|
53 |
+
|
54 |
+
def test_vocab_len_respects_cutoff(self):
|
55 |
+
# Vocab size is the number of unique tokens that occur at least as often
|
56 |
+
# as the cutoff value, plus 1 to account for unknown words.
|
57 |
+
self.assertEqual(5, len(self.vocab))
|
58 |
+
|
59 |
+
def test_vocab_iter_respects_cutoff(self):
|
60 |
+
vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"]
|
61 |
+
vocab_items = ["a", "b", "d", "e", "<UNK>"]
|
62 |
+
|
63 |
+
self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys()))
|
64 |
+
self.assertCountEqual(vocab_items, list(self.vocab))
|
65 |
+
|
66 |
+
def test_update_empty_vocab(self):
|
67 |
+
empty = Vocabulary(unk_cutoff=2)
|
68 |
+
self.assertEqual(len(empty), 0)
|
69 |
+
self.assertFalse(empty)
|
70 |
+
self.assertIn(empty.unk_label, empty)
|
71 |
+
|
72 |
+
empty.update(list("abcde"))
|
73 |
+
self.assertIn(empty.unk_label, empty)
|
74 |
+
|
75 |
+
def test_lookup(self):
|
76 |
+
self.assertEqual(self.vocab.lookup("a"), "a")
|
77 |
+
self.assertEqual(self.vocab.lookup("c"), "<UNK>")
|
78 |
+
|
79 |
+
def test_lookup_iterables(self):
|
80 |
+
self.assertEqual(self.vocab.lookup(["a", "b"]), ("a", "b"))
|
81 |
+
self.assertEqual(self.vocab.lookup(("a", "b")), ("a", "b"))
|
82 |
+
self.assertEqual(self.vocab.lookup(("a", "c")), ("a", "<UNK>"))
|
83 |
+
self.assertEqual(
|
84 |
+
self.vocab.lookup(map(str, range(3))), ("<UNK>", "<UNK>", "<UNK>")
|
85 |
+
)
|
86 |
+
|
87 |
+
def test_lookup_empty_iterables(self):
|
88 |
+
self.assertEqual(self.vocab.lookup(()), ())
|
89 |
+
self.assertEqual(self.vocab.lookup([]), ())
|
90 |
+
self.assertEqual(self.vocab.lookup(iter([])), ())
|
91 |
+
self.assertEqual(self.vocab.lookup(n for n in range(0, 0)), ())
|
92 |
+
|
93 |
+
def test_lookup_recursive(self):
|
94 |
+
self.assertEqual(
|
95 |
+
self.vocab.lookup([["a", "b"], ["a", "c"]]), (("a", "b"), ("a", "<UNK>"))
|
96 |
+
)
|
97 |
+
self.assertEqual(self.vocab.lookup([["a", "b"], "c"]), (("a", "b"), "<UNK>"))
|
98 |
+
self.assertEqual(self.vocab.lookup([[[[["a", "b"]]]]]), ((((("a", "b"),),),),))
|
99 |
+
|
100 |
+
def test_lookup_None(self):
|
101 |
+
with self.assertRaises(TypeError):
|
102 |
+
self.vocab.lookup(None)
|
103 |
+
with self.assertRaises(TypeError):
|
104 |
+
list(self.vocab.lookup([None, None]))
|
105 |
+
|
106 |
+
def test_lookup_int(self):
|
107 |
+
with self.assertRaises(TypeError):
|
108 |
+
self.vocab.lookup(1)
|
109 |
+
with self.assertRaises(TypeError):
|
110 |
+
list(self.vocab.lookup([1, 2]))
|
111 |
+
|
112 |
+
def test_lookup_empty_str(self):
|
113 |
+
self.assertEqual(self.vocab.lookup(""), "<UNK>")
|
114 |
+
|
115 |
+
def test_eqality(self):
|
116 |
+
v1 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
|
117 |
+
v2 = Vocabulary(["a", "b", "c"], unk_cutoff=1)
|
118 |
+
v3 = Vocabulary(["a", "b", "c"], unk_cutoff=1, unk_label="blah")
|
119 |
+
v4 = Vocabulary(["a", "b"], unk_cutoff=1)
|
120 |
+
|
121 |
+
self.assertEqual(v1, v2)
|
122 |
+
self.assertNotEqual(v1, v3)
|
123 |
+
self.assertNotEqual(v1, v4)
|
124 |
+
|
125 |
+
def test_str(self):
|
126 |
+
self.assertEqual(
|
127 |
+
str(self.vocab), "<Vocabulary with cutoff=2 unk_label='<UNK>' and 5 items>"
|
128 |
+
)
|
129 |
+
|
130 |
+
def test_creation_with_counter(self):
|
131 |
+
self.assertEqual(
|
132 |
+
self.vocab,
|
133 |
+
Vocabulary(
|
134 |
+
Counter(
|
135 |
+
["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"]
|
136 |
+
),
|
137 |
+
unk_cutoff=2,
|
138 |
+
),
|
139 |
+
)
|
140 |
+
|
141 |
+
@unittest.skip(
|
142 |
+
reason="Test is known to be flaky as it compares (runtime) performance."
|
143 |
+
)
|
144 |
+
def test_len_is_constant(self):
|
145 |
+
# Given an obviously small and an obviously large vocabulary.
|
146 |
+
small_vocab = Vocabulary("abcde")
|
147 |
+
from nltk.corpus.europarl_raw import english
|
148 |
+
|
149 |
+
large_vocab = Vocabulary(english.words())
|
150 |
+
|
151 |
+
# If we time calling `len` on them.
|
152 |
+
small_vocab_len_time = timeit("len(small_vocab)", globals=locals())
|
153 |
+
large_vocab_len_time = timeit("len(large_vocab)", globals=locals())
|
154 |
+
|
155 |
+
# The timing should be the same order of magnitude.
|
156 |
+
self.assertAlmostEqual(small_vocab_len_time, large_vocab_len_time, places=1)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_aline.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test Aline algorithm for aligning phonetic sequences
|
3 |
+
"""
|
4 |
+
from nltk.metrics import aline
|
5 |
+
|
6 |
+
|
7 |
+
def test_aline():
|
8 |
+
result = aline.align("θin", "tenwis")
|
9 |
+
expected = [[("θ", "t"), ("i", "e"), ("n", "n")]]
|
10 |
+
|
11 |
+
assert result == expected
|
12 |
+
|
13 |
+
result = aline.align("jo", "ʒə")
|
14 |
+
expected = [[("j", "ʒ"), ("o", "ə")]]
|
15 |
+
|
16 |
+
assert result == expected
|
17 |
+
|
18 |
+
result = aline.align("pematesiweni", "pematesewen")
|
19 |
+
expected = [
|
20 |
+
[
|
21 |
+
("p", "p"),
|
22 |
+
("e", "e"),
|
23 |
+
("m", "m"),
|
24 |
+
("a", "a"),
|
25 |
+
("t", "t"),
|
26 |
+
("e", "e"),
|
27 |
+
("s", "s"),
|
28 |
+
("i", "e"),
|
29 |
+
("w", "w"),
|
30 |
+
("e", "e"),
|
31 |
+
("n", "n"),
|
32 |
+
]
|
33 |
+
]
|
34 |
+
|
35 |
+
assert result == expected
|
36 |
+
|
37 |
+
result = aline.align("tuwθ", "dentis")
|
38 |
+
expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]]
|
39 |
+
|
40 |
+
assert result == expected
|
41 |
+
|
42 |
+
|
43 |
+
def test_aline_delta():
|
44 |
+
"""
|
45 |
+
Test aline for computing the difference between two segments
|
46 |
+
"""
|
47 |
+
assert aline.delta("p", "q") == 20.0
|
48 |
+
assert aline.delta("a", "A") == 0.0
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.data import find
|
4 |
+
from nltk.parse.bllip import BllipParser
|
5 |
+
from nltk.tree import Tree
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.fixture(scope="module")
|
9 |
+
def parser():
|
10 |
+
model_dir = find("models/bllip_wsj_no_aux").path
|
11 |
+
return BllipParser.from_unified_model_dir(model_dir)
|
12 |
+
|
13 |
+
|
14 |
+
def setup_module():
|
15 |
+
pytest.importorskip("bllipparser")
|
16 |
+
|
17 |
+
|
18 |
+
class TestBllipParser:
|
19 |
+
def test_parser_loads_a_valid_tree(self, parser):
|
20 |
+
parsed = parser.parse("I saw the man with the telescope")
|
21 |
+
tree = next(parsed)
|
22 |
+
|
23 |
+
assert isinstance(tree, Tree)
|
24 |
+
assert (
|
25 |
+
tree.pformat()
|
26 |
+
== """
|
27 |
+
(S1
|
28 |
+
(S
|
29 |
+
(NP (PRP I))
|
30 |
+
(VP
|
31 |
+
(VBD saw)
|
32 |
+
(NP (DT the) (NN man))
|
33 |
+
(PP (IN with) (NP (DT the) (NN telescope))))))
|
34 |
+
""".strip()
|
35 |
+
)
|
36 |
+
|
37 |
+
def test_tagged_parse_finds_matching_element(self, parser):
|
38 |
+
parsed = parser.parse("I saw the man with the telescope")
|
39 |
+
tagged_tree = next(parser.tagged_parse([("telescope", "NN")]))
|
40 |
+
|
41 |
+
assert isinstance(tagged_tree, Tree)
|
42 |
+
assert tagged_tree.pformat() == "(S1 (NP (NN telescope)))"
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_brill.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for Brill tagger.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import unittest
|
6 |
+
|
7 |
+
from nltk.corpus import treebank
|
8 |
+
from nltk.tag import UnigramTagger, brill, brill_trainer
|
9 |
+
from nltk.tbl import demo
|
10 |
+
|
11 |
+
|
12 |
+
class TestBrill(unittest.TestCase):
|
13 |
+
def test_pos_template(self):
|
14 |
+
train_sents = treebank.tagged_sents()[:1000]
|
15 |
+
tagger = UnigramTagger(train_sents)
|
16 |
+
trainer = brill_trainer.BrillTaggerTrainer(
|
17 |
+
tagger, [brill.Template(brill.Pos([-1]))]
|
18 |
+
)
|
19 |
+
brill_tagger = trainer.train(train_sents)
|
20 |
+
# Example from https://github.com/nltk/nltk/issues/769
|
21 |
+
result = brill_tagger.tag("This is a foo bar sentence".split())
|
22 |
+
expected = [
|
23 |
+
("This", "DT"),
|
24 |
+
("is", "VBZ"),
|
25 |
+
("a", "DT"),
|
26 |
+
("foo", None),
|
27 |
+
("bar", "NN"),
|
28 |
+
("sentence", None),
|
29 |
+
]
|
30 |
+
self.assertEqual(result, expected)
|
31 |
+
|
32 |
+
@unittest.skip("Should be tested in __main__ of nltk.tbl.demo")
|
33 |
+
def test_brill_demo(self):
|
34 |
+
demo()
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk import ConditionalFreqDist, tokenize
|
6 |
+
|
7 |
+
|
8 |
+
class TestEmptyCondFreq(unittest.TestCase):
|
9 |
+
def test_tabulate(self):
|
10 |
+
empty = ConditionalFreqDist()
|
11 |
+
self.assertEqual(empty.conditions(), [])
|
12 |
+
with pytest.raises(ValueError):
|
13 |
+
empty.tabulate(conditions="BUG") # nonexistent keys shouldn't be added
|
14 |
+
self.assertEqual(empty.conditions(), [])
|
15 |
+
|
16 |
+
def test_plot(self):
|
17 |
+
empty = ConditionalFreqDist()
|
18 |
+
self.assertEqual(empty.conditions(), [])
|
19 |
+
empty.plot(conditions=["BUG"]) # nonexistent keys shouldn't be added
|
20 |
+
self.assertEqual(empty.conditions(), [])
|
21 |
+
|
22 |
+
def test_increment(self):
|
23 |
+
# make sure that we can still mutate cfd normally
|
24 |
+
text = "cow cat mouse cat tiger"
|
25 |
+
cfd = ConditionalFreqDist()
|
26 |
+
|
27 |
+
# create cfd with word length as condition
|
28 |
+
for word in tokenize.word_tokenize(text):
|
29 |
+
condition = len(word)
|
30 |
+
cfd[condition][word] += 1
|
31 |
+
|
32 |
+
self.assertEqual(cfd.conditions(), [3, 5])
|
33 |
+
|
34 |
+
# incrementing previously unseen key is still possible
|
35 |
+
cfd[2]["hi"] += 1
|
36 |
+
self.assertCountEqual(cfd.conditions(), [3, 5, 2]) # new condition added
|
37 |
+
self.assertEqual(
|
38 |
+
cfd[2]["hi"], 1
|
39 |
+
) # key's frequency incremented from 0 (unseen) to 1
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import nltk
|
4 |
+
from nltk.grammar import CFG
|
5 |
+
|
6 |
+
|
7 |
+
class ChomskyNormalFormForCFGTest(unittest.TestCase):
|
8 |
+
def test_simple(self):
|
9 |
+
grammar = CFG.fromstring(
|
10 |
+
"""
|
11 |
+
S -> NP VP
|
12 |
+
PP -> P NP
|
13 |
+
NP -> Det N | NP PP P
|
14 |
+
VP -> V NP | VP PP
|
15 |
+
VP -> Det
|
16 |
+
Det -> 'a' | 'the'
|
17 |
+
N -> 'dog' | 'cat'
|
18 |
+
V -> 'chased' | 'sat'
|
19 |
+
P -> 'on' | 'in'
|
20 |
+
"""
|
21 |
+
)
|
22 |
+
self.assertFalse(grammar.is_flexible_chomsky_normal_form())
|
23 |
+
self.assertFalse(grammar.is_chomsky_normal_form())
|
24 |
+
grammar = grammar.chomsky_normal_form(flexible=True)
|
25 |
+
self.assertTrue(grammar.is_flexible_chomsky_normal_form())
|
26 |
+
self.assertFalse(grammar.is_chomsky_normal_form())
|
27 |
+
|
28 |
+
grammar2 = CFG.fromstring(
|
29 |
+
"""
|
30 |
+
S -> NP VP
|
31 |
+
NP -> VP N P
|
32 |
+
VP -> P
|
33 |
+
N -> 'dog' | 'cat'
|
34 |
+
P -> 'on' | 'in'
|
35 |
+
"""
|
36 |
+
)
|
37 |
+
self.assertFalse(grammar2.is_flexible_chomsky_normal_form())
|
38 |
+
self.assertFalse(grammar2.is_chomsky_normal_form())
|
39 |
+
grammar2 = grammar2.chomsky_normal_form()
|
40 |
+
self.assertTrue(grammar2.is_flexible_chomsky_normal_form())
|
41 |
+
self.assertTrue(grammar2.is_chomsky_normal_form())
|
42 |
+
|
43 |
+
def test_complex(self):
|
44 |
+
grammar = nltk.data.load("grammars/large_grammars/atis.cfg")
|
45 |
+
self.assertFalse(grammar.is_flexible_chomsky_normal_form())
|
46 |
+
self.assertFalse(grammar.is_chomsky_normal_form())
|
47 |
+
grammar = grammar.chomsky_normal_form(flexible=True)
|
48 |
+
self.assertTrue(grammar.is_flexible_chomsky_normal_form())
|
49 |
+
self.assertFalse(grammar.is_chomsky_normal_form())
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk import RegexpParser
|
4 |
+
|
5 |
+
|
6 |
+
class TestChunkRule(unittest.TestCase):
|
7 |
+
def test_tag_pattern2re_pattern_quantifier(self):
|
8 |
+
"""Test for bug https://github.com/nltk/nltk/issues/1597
|
9 |
+
|
10 |
+
Ensures that curly bracket quantifiers can be used inside a chunk rule.
|
11 |
+
This type of quantifier has been used for the supplementary example
|
12 |
+
in https://www.nltk.org/book/ch07.html#exploring-text-corpora.
|
13 |
+
"""
|
14 |
+
sent = [
|
15 |
+
("The", "AT"),
|
16 |
+
("September-October", "NP"),
|
17 |
+
("term", "NN"),
|
18 |
+
("jury", "NN"),
|
19 |
+
("had", "HVD"),
|
20 |
+
("been", "BEN"),
|
21 |
+
("charged", "VBN"),
|
22 |
+
("by", "IN"),
|
23 |
+
("Fulton", "NP-TL"),
|
24 |
+
("Superior", "JJ-TL"),
|
25 |
+
("Court", "NN-TL"),
|
26 |
+
("Judge", "NN-TL"),
|
27 |
+
("Durwood", "NP"),
|
28 |
+
("Pye", "NP"),
|
29 |
+
("to", "TO"),
|
30 |
+
("investigate", "VB"),
|
31 |
+
("reports", "NNS"),
|
32 |
+
("of", "IN"),
|
33 |
+
("possible", "JJ"),
|
34 |
+
("``", "``"),
|
35 |
+
("irregularities", "NNS"),
|
36 |
+
("''", "''"),
|
37 |
+
("in", "IN"),
|
38 |
+
("the", "AT"),
|
39 |
+
("hard-fought", "JJ"),
|
40 |
+
("primary", "NN"),
|
41 |
+
("which", "WDT"),
|
42 |
+
("was", "BEDZ"),
|
43 |
+
("won", "VBN"),
|
44 |
+
("by", "IN"),
|
45 |
+
("Mayor-nominate", "NN-TL"),
|
46 |
+
("Ivan", "NP"),
|
47 |
+
("Allen", "NP"),
|
48 |
+
("Jr.", "NP"),
|
49 |
+
(".", "."),
|
50 |
+
] # source: brown corpus
|
51 |
+
cp = RegexpParser("CHUNK: {<N.*>{4,}}")
|
52 |
+
tree = cp.parse(sent)
|
53 |
+
assert (
|
54 |
+
tree.pformat()
|
55 |
+
== """(S
|
56 |
+
The/AT
|
57 |
+
September-October/NP
|
58 |
+
term/NN
|
59 |
+
jury/NN
|
60 |
+
had/HVD
|
61 |
+
been/BEN
|
62 |
+
charged/VBN
|
63 |
+
by/IN
|
64 |
+
Fulton/NP-TL
|
65 |
+
Superior/JJ-TL
|
66 |
+
(CHUNK Court/NN-TL Judge/NN-TL Durwood/NP Pye/NP)
|
67 |
+
to/TO
|
68 |
+
investigate/VB
|
69 |
+
reports/NNS
|
70 |
+
of/IN
|
71 |
+
possible/JJ
|
72 |
+
``/``
|
73 |
+
irregularities/NNS
|
74 |
+
''/''
|
75 |
+
in/IN
|
76 |
+
the/AT
|
77 |
+
hard-fought/JJ
|
78 |
+
primary/NN
|
79 |
+
which/WDT
|
80 |
+
was/BEDZ
|
81 |
+
won/VBN
|
82 |
+
by/IN
|
83 |
+
(CHUNK Mayor-nominate/NN-TL Ivan/NP Allen/NP Jr./NP)
|
84 |
+
./.)"""
|
85 |
+
)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_classify.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for nltk.classify. See also: nltk/test/classify.doctest
|
3 |
+
"""
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from nltk import classify
|
7 |
+
|
8 |
+
TRAIN = [
|
9 |
+
(dict(a=1, b=1, c=1), "y"),
|
10 |
+
(dict(a=1, b=1, c=1), "x"),
|
11 |
+
(dict(a=1, b=1, c=0), "y"),
|
12 |
+
(dict(a=0, b=1, c=1), "x"),
|
13 |
+
(dict(a=0, b=1, c=1), "y"),
|
14 |
+
(dict(a=0, b=0, c=1), "y"),
|
15 |
+
(dict(a=0, b=1, c=0), "x"),
|
16 |
+
(dict(a=0, b=0, c=0), "x"),
|
17 |
+
(dict(a=0, b=1, c=1), "y"),
|
18 |
+
]
|
19 |
+
|
20 |
+
TEST = [
|
21 |
+
(dict(a=1, b=0, c=1)), # unseen
|
22 |
+
(dict(a=1, b=0, c=0)), # unseen
|
23 |
+
(dict(a=0, b=1, c=1)), # seen 3 times, labels=y,y,x
|
24 |
+
(dict(a=0, b=1, c=0)), # seen 1 time, label=x
|
25 |
+
]
|
26 |
+
|
27 |
+
RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)]
|
28 |
+
|
29 |
+
|
30 |
+
def assert_classifier_correct(algorithm):
|
31 |
+
try:
|
32 |
+
classifier = classify.MaxentClassifier.train(
|
33 |
+
TRAIN, algorithm, trace=0, max_iter=1000
|
34 |
+
)
|
35 |
+
except (LookupError, AttributeError) as e:
|
36 |
+
pytest.skip(str(e))
|
37 |
+
|
38 |
+
for (px, py), featureset in zip(RESULTS, TEST):
|
39 |
+
pdist = classifier.prob_classify(featureset)
|
40 |
+
assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px)
|
41 |
+
assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py)
|
42 |
+
|
43 |
+
|
44 |
+
def test_megam():
|
45 |
+
assert_classifier_correct("MEGAM")
|
46 |
+
|
47 |
+
|
48 |
+
def test_tadm():
|
49 |
+
assert_classifier_correct("TADM")
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk.collocations import BigramCollocationFinder
|
2 |
+
from nltk.metrics import BigramAssocMeasures
|
3 |
+
|
4 |
+
## Test bigram counters with discontinuous bigrams and repeated words
|
5 |
+
|
6 |
+
_EPSILON = 1e-8
|
7 |
+
SENT = "this this is is a a test test".split()
|
8 |
+
|
9 |
+
|
10 |
+
def close_enough(x, y):
|
11 |
+
"""Verify that two sequences of n-gram association values are within
|
12 |
+
_EPSILON of each other.
|
13 |
+
"""
|
14 |
+
|
15 |
+
return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y))
|
16 |
+
|
17 |
+
|
18 |
+
def test_bigram2():
|
19 |
+
b = BigramCollocationFinder.from_words(SENT)
|
20 |
+
|
21 |
+
assert sorted(b.ngram_fd.items()) == [
|
22 |
+
(("a", "a"), 1),
|
23 |
+
(("a", "test"), 1),
|
24 |
+
(("is", "a"), 1),
|
25 |
+
(("is", "is"), 1),
|
26 |
+
(("test", "test"), 1),
|
27 |
+
(("this", "is"), 1),
|
28 |
+
(("this", "this"), 1),
|
29 |
+
]
|
30 |
+
assert sorted(b.word_fd.items()) == [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
31 |
+
|
32 |
+
assert len(SENT) == sum(b.word_fd.values()) == sum(b.ngram_fd.values()) + 1
|
33 |
+
assert close_enough(
|
34 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
35 |
+
[
|
36 |
+
(("a", "a"), 1.0),
|
37 |
+
(("a", "test"), 1.0),
|
38 |
+
(("is", "a"), 1.0),
|
39 |
+
(("is", "is"), 1.0),
|
40 |
+
(("test", "test"), 1.0),
|
41 |
+
(("this", "is"), 1.0),
|
42 |
+
(("this", "this"), 1.0),
|
43 |
+
],
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
def test_bigram3():
|
48 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=3)
|
49 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
50 |
+
[
|
51 |
+
(("a", "test"), 3),
|
52 |
+
(("is", "a"), 3),
|
53 |
+
(("this", "is"), 3),
|
54 |
+
(("a", "a"), 1),
|
55 |
+
(("is", "is"), 1),
|
56 |
+
(("test", "test"), 1),
|
57 |
+
(("this", "this"), 1),
|
58 |
+
]
|
59 |
+
)
|
60 |
+
|
61 |
+
assert sorted(b.word_fd.items()) == sorted(
|
62 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
63 |
+
)
|
64 |
+
|
65 |
+
assert (
|
66 |
+
len(SENT) == sum(b.word_fd.values()) == (sum(b.ngram_fd.values()) + 2 + 1) / 2.0
|
67 |
+
)
|
68 |
+
assert close_enough(
|
69 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
70 |
+
sorted(
|
71 |
+
[
|
72 |
+
(("a", "test"), 1.584962500721156),
|
73 |
+
(("is", "a"), 1.584962500721156),
|
74 |
+
(("this", "is"), 1.584962500721156),
|
75 |
+
(("a", "a"), 0.0),
|
76 |
+
(("is", "is"), 0.0),
|
77 |
+
(("test", "test"), 0.0),
|
78 |
+
(("this", "this"), 0.0),
|
79 |
+
]
|
80 |
+
),
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
def test_bigram5():
|
85 |
+
b = BigramCollocationFinder.from_words(SENT, window_size=5)
|
86 |
+
assert sorted(b.ngram_fd.items()) == sorted(
|
87 |
+
[
|
88 |
+
(("a", "test"), 4),
|
89 |
+
(("is", "a"), 4),
|
90 |
+
(("this", "is"), 4),
|
91 |
+
(("is", "test"), 3),
|
92 |
+
(("this", "a"), 3),
|
93 |
+
(("a", "a"), 1),
|
94 |
+
(("is", "is"), 1),
|
95 |
+
(("test", "test"), 1),
|
96 |
+
(("this", "this"), 1),
|
97 |
+
]
|
98 |
+
)
|
99 |
+
assert sorted(b.word_fd.items()) == sorted(
|
100 |
+
[("a", 2), ("is", 2), ("test", 2), ("this", 2)]
|
101 |
+
)
|
102 |
+
n_word_fd = sum(b.word_fd.values())
|
103 |
+
n_ngram_fd = (sum(b.ngram_fd.values()) + 4 + 3 + 2 + 1) / 4.0
|
104 |
+
assert len(SENT) == n_word_fd == n_ngram_fd
|
105 |
+
assert close_enough(
|
106 |
+
sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
|
107 |
+
sorted(
|
108 |
+
[
|
109 |
+
(("a", "test"), 1.0),
|
110 |
+
(("is", "a"), 1.0),
|
111 |
+
(("this", "is"), 1.0),
|
112 |
+
(("is", "test"), 0.5849625007211562),
|
113 |
+
(("this", "a"), 0.5849625007211562),
|
114 |
+
(("a", "a"), -1.0),
|
115 |
+
(("is", "is"), -1.0),
|
116 |
+
(("test", "test"), -1.0),
|
117 |
+
(("this", "this"), -1.0),
|
118 |
+
]
|
119 |
+
),
|
120 |
+
)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import sys
|
3 |
+
import unittest
|
4 |
+
from io import StringIO
|
5 |
+
|
6 |
+
from nltk.corpus import gutenberg
|
7 |
+
from nltk.text import Text
|
8 |
+
|
9 |
+
|
10 |
+
@contextlib.contextmanager
|
11 |
+
def stdout_redirect(where):
|
12 |
+
sys.stdout = where
|
13 |
+
try:
|
14 |
+
yield where
|
15 |
+
finally:
|
16 |
+
sys.stdout = sys.__stdout__
|
17 |
+
|
18 |
+
|
19 |
+
class TestConcordance(unittest.TestCase):
|
20 |
+
"""Text constructed using: https://www.nltk.org/book/ch01.html"""
|
21 |
+
|
22 |
+
@classmethod
|
23 |
+
def setUpClass(cls):
|
24 |
+
cls.corpus = gutenberg.words("melville-moby_dick.txt")
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
def tearDownClass(cls):
|
28 |
+
pass
|
29 |
+
|
30 |
+
def setUp(self):
|
31 |
+
self.text = Text(TestConcordance.corpus)
|
32 |
+
self.query = "monstrous"
|
33 |
+
self.maxDiff = None
|
34 |
+
self.list_out = [
|
35 |
+
"ong the former , one was of a most monstrous size . ... This came towards us , ",
|
36 |
+
'ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r',
|
37 |
+
"ll over with a heathenish array of monstrous clubs and spears . Some were thick",
|
38 |
+
"d as you gazed , and wondered what monstrous cannibal and savage could ever hav",
|
39 |
+
"that has survived the flood ; most monstrous and most mountainous ! That Himmal",
|
40 |
+
"they might scout at Moby Dick as a monstrous fable , or still worse and more de",
|
41 |
+
"th of Radney .'\" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l",
|
42 |
+
"ing Scenes . In connexion with the monstrous pictures of whales , I am strongly",
|
43 |
+
"ere to enter upon those still more monstrous stories of them which are to be fo",
|
44 |
+
"ght have been rummaged out of this monstrous cabinet there is no telling . But ",
|
45 |
+
"of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u",
|
46 |
+
]
|
47 |
+
|
48 |
+
def tearDown(self):
|
49 |
+
pass
|
50 |
+
|
51 |
+
def test_concordance_list(self):
|
52 |
+
concordance_out = self.text.concordance_list(self.query)
|
53 |
+
self.assertEqual(self.list_out, [c.line for c in concordance_out])
|
54 |
+
|
55 |
+
def test_concordance_width(self):
|
56 |
+
list_out = [
|
57 |
+
"monstrous",
|
58 |
+
"monstrous",
|
59 |
+
"monstrous",
|
60 |
+
"monstrous",
|
61 |
+
"monstrous",
|
62 |
+
"monstrous",
|
63 |
+
"Monstrous",
|
64 |
+
"monstrous",
|
65 |
+
"monstrous",
|
66 |
+
"monstrous",
|
67 |
+
"monstrous",
|
68 |
+
]
|
69 |
+
|
70 |
+
concordance_out = self.text.concordance_list(self.query, width=0)
|
71 |
+
self.assertEqual(list_out, [c.query for c in concordance_out])
|
72 |
+
|
73 |
+
def test_concordance_lines(self):
|
74 |
+
concordance_out = self.text.concordance_list(self.query, lines=3)
|
75 |
+
self.assertEqual(self.list_out[:3], [c.line for c in concordance_out])
|
76 |
+
|
77 |
+
def test_concordance_print(self):
|
78 |
+
print_out = """Displaying 11 of 11 matches:
|
79 |
+
ong the former , one was of a most monstrous size . ... This came towards us ,
|
80 |
+
ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
|
81 |
+
ll over with a heathenish array of monstrous clubs and spears . Some were thick
|
82 |
+
d as you gazed , and wondered what monstrous cannibal and savage could ever hav
|
83 |
+
that has survived the flood ; most monstrous and most mountainous ! That Himmal
|
84 |
+
they might scout at Moby Dick as a monstrous fable , or still worse and more de
|
85 |
+
th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
|
86 |
+
ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
|
87 |
+
ere to enter upon those still more monstrous stories of them which are to be fo
|
88 |
+
ght have been rummaged out of this monstrous cabinet there is no telling . But
|
89 |
+
of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
|
90 |
+
"""
|
91 |
+
|
92 |
+
with stdout_redirect(StringIO()) as stdout:
|
93 |
+
self.text.concordance(self.query)
|
94 |
+
|
95 |
+
def strip_space(raw_str):
|
96 |
+
return raw_str.replace(" ", "")
|
97 |
+
|
98 |
+
self.assertEqual(strip_space(print_out), strip_space(stdout.getvalue()))
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py
ADDED
@@ -0,0 +1,1436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Mock test for Stanford CoreNLP wrappers.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from unittest import TestCase
|
6 |
+
from unittest.mock import MagicMock
|
7 |
+
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from nltk.parse import corenlp
|
11 |
+
from nltk.tree import Tree
|
12 |
+
|
13 |
+
|
14 |
+
def setup_module(module):
|
15 |
+
global server
|
16 |
+
|
17 |
+
try:
|
18 |
+
server = corenlp.CoreNLPServer(port=9000)
|
19 |
+
except LookupError:
|
20 |
+
pytest.skip("Could not instantiate CoreNLPServer.")
|
21 |
+
|
22 |
+
try:
|
23 |
+
server.start()
|
24 |
+
except corenlp.CoreNLPServerError as e:
|
25 |
+
pytest.skip(
|
26 |
+
"Skipping CoreNLP tests because the server could not be started. "
|
27 |
+
"Make sure that the 9000 port is free. "
|
28 |
+
"{}".format(e.strerror)
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
def teardown_module(module):
|
33 |
+
server.stop()
|
34 |
+
|
35 |
+
|
36 |
+
class TestTokenizerAPI(TestCase):
|
37 |
+
def test_tokenize(self):
|
38 |
+
corenlp_tokenizer = corenlp.CoreNLPParser()
|
39 |
+
|
40 |
+
api_return_value = {
|
41 |
+
"sentences": [
|
42 |
+
{
|
43 |
+
"index": 0,
|
44 |
+
"tokens": [
|
45 |
+
{
|
46 |
+
"after": " ",
|
47 |
+
"before": "",
|
48 |
+
"characterOffsetBegin": 0,
|
49 |
+
"characterOffsetEnd": 4,
|
50 |
+
"index": 1,
|
51 |
+
"originalText": "Good",
|
52 |
+
"word": "Good",
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"after": " ",
|
56 |
+
"before": " ",
|
57 |
+
"characterOffsetBegin": 5,
|
58 |
+
"characterOffsetEnd": 12,
|
59 |
+
"index": 2,
|
60 |
+
"originalText": "muffins",
|
61 |
+
"word": "muffins",
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"after": " ",
|
65 |
+
"before": " ",
|
66 |
+
"characterOffsetBegin": 13,
|
67 |
+
"characterOffsetEnd": 17,
|
68 |
+
"index": 3,
|
69 |
+
"originalText": "cost",
|
70 |
+
"word": "cost",
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"after": "",
|
74 |
+
"before": " ",
|
75 |
+
"characterOffsetBegin": 18,
|
76 |
+
"characterOffsetEnd": 19,
|
77 |
+
"index": 4,
|
78 |
+
"originalText": "$",
|
79 |
+
"word": "$",
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"after": "\n",
|
83 |
+
"before": "",
|
84 |
+
"characterOffsetBegin": 19,
|
85 |
+
"characterOffsetEnd": 23,
|
86 |
+
"index": 5,
|
87 |
+
"originalText": "3.88",
|
88 |
+
"word": "3.88",
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"after": " ",
|
92 |
+
"before": "\n",
|
93 |
+
"characterOffsetBegin": 24,
|
94 |
+
"characterOffsetEnd": 26,
|
95 |
+
"index": 6,
|
96 |
+
"originalText": "in",
|
97 |
+
"word": "in",
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"after": " ",
|
101 |
+
"before": " ",
|
102 |
+
"characterOffsetBegin": 27,
|
103 |
+
"characterOffsetEnd": 30,
|
104 |
+
"index": 7,
|
105 |
+
"originalText": "New",
|
106 |
+
"word": "New",
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"after": "",
|
110 |
+
"before": " ",
|
111 |
+
"characterOffsetBegin": 31,
|
112 |
+
"characterOffsetEnd": 35,
|
113 |
+
"index": 8,
|
114 |
+
"originalText": "York",
|
115 |
+
"word": "York",
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"after": " ",
|
119 |
+
"before": "",
|
120 |
+
"characterOffsetBegin": 35,
|
121 |
+
"characterOffsetEnd": 36,
|
122 |
+
"index": 9,
|
123 |
+
"originalText": ".",
|
124 |
+
"word": ".",
|
125 |
+
},
|
126 |
+
],
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"index": 1,
|
130 |
+
"tokens": [
|
131 |
+
{
|
132 |
+
"after": " ",
|
133 |
+
"before": " ",
|
134 |
+
"characterOffsetBegin": 38,
|
135 |
+
"characterOffsetEnd": 44,
|
136 |
+
"index": 1,
|
137 |
+
"originalText": "Please",
|
138 |
+
"word": "Please",
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"after": " ",
|
142 |
+
"before": " ",
|
143 |
+
"characterOffsetBegin": 45,
|
144 |
+
"characterOffsetEnd": 48,
|
145 |
+
"index": 2,
|
146 |
+
"originalText": "buy",
|
147 |
+
"word": "buy",
|
148 |
+
},
|
149 |
+
{
|
150 |
+
"after": "\n",
|
151 |
+
"before": " ",
|
152 |
+
"characterOffsetBegin": 49,
|
153 |
+
"characterOffsetEnd": 51,
|
154 |
+
"index": 3,
|
155 |
+
"originalText": "me",
|
156 |
+
"word": "me",
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"after": " ",
|
160 |
+
"before": "\n",
|
161 |
+
"characterOffsetBegin": 52,
|
162 |
+
"characterOffsetEnd": 55,
|
163 |
+
"index": 4,
|
164 |
+
"originalText": "two",
|
165 |
+
"word": "two",
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"after": " ",
|
169 |
+
"before": " ",
|
170 |
+
"characterOffsetBegin": 56,
|
171 |
+
"characterOffsetEnd": 58,
|
172 |
+
"index": 5,
|
173 |
+
"originalText": "of",
|
174 |
+
"word": "of",
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"after": "",
|
178 |
+
"before": " ",
|
179 |
+
"characterOffsetBegin": 59,
|
180 |
+
"characterOffsetEnd": 63,
|
181 |
+
"index": 6,
|
182 |
+
"originalText": "them",
|
183 |
+
"word": "them",
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"after": "\n",
|
187 |
+
"before": "",
|
188 |
+
"characterOffsetBegin": 63,
|
189 |
+
"characterOffsetEnd": 64,
|
190 |
+
"index": 7,
|
191 |
+
"originalText": ".",
|
192 |
+
"word": ".",
|
193 |
+
},
|
194 |
+
],
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"index": 2,
|
198 |
+
"tokens": [
|
199 |
+
{
|
200 |
+
"after": "",
|
201 |
+
"before": "\n",
|
202 |
+
"characterOffsetBegin": 65,
|
203 |
+
"characterOffsetEnd": 71,
|
204 |
+
"index": 1,
|
205 |
+
"originalText": "Thanks",
|
206 |
+
"word": "Thanks",
|
207 |
+
},
|
208 |
+
{
|
209 |
+
"after": "",
|
210 |
+
"before": "",
|
211 |
+
"characterOffsetBegin": 71,
|
212 |
+
"characterOffsetEnd": 72,
|
213 |
+
"index": 2,
|
214 |
+
"originalText": ".",
|
215 |
+
"word": ".",
|
216 |
+
},
|
217 |
+
],
|
218 |
+
},
|
219 |
+
]
|
220 |
+
}
|
221 |
+
corenlp_tokenizer.api_call = MagicMock(return_value=api_return_value)
|
222 |
+
|
223 |
+
input_string = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks."
|
224 |
+
|
225 |
+
expected_output = [
|
226 |
+
"Good",
|
227 |
+
"muffins",
|
228 |
+
"cost",
|
229 |
+
"$",
|
230 |
+
"3.88",
|
231 |
+
"in",
|
232 |
+
"New",
|
233 |
+
"York",
|
234 |
+
".",
|
235 |
+
"Please",
|
236 |
+
"buy",
|
237 |
+
"me",
|
238 |
+
"two",
|
239 |
+
"of",
|
240 |
+
"them",
|
241 |
+
".",
|
242 |
+
"Thanks",
|
243 |
+
".",
|
244 |
+
]
|
245 |
+
|
246 |
+
tokenized_output = list(corenlp_tokenizer.tokenize(input_string))
|
247 |
+
|
248 |
+
corenlp_tokenizer.api_call.assert_called_once_with(
|
249 |
+
"Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.",
|
250 |
+
properties={"annotators": "tokenize,ssplit"},
|
251 |
+
)
|
252 |
+
self.assertEqual(expected_output, tokenized_output)
|
253 |
+
|
254 |
+
|
255 |
+
class TestTaggerAPI(TestCase):
|
256 |
+
def test_pos_tagger(self):
|
257 |
+
corenlp_tagger = corenlp.CoreNLPParser(tagtype="pos")
|
258 |
+
|
259 |
+
api_return_value = {
|
260 |
+
"sentences": [
|
261 |
+
{
|
262 |
+
"basicDependencies": [
|
263 |
+
{
|
264 |
+
"dep": "ROOT",
|
265 |
+
"dependent": 1,
|
266 |
+
"dependentGloss": "What",
|
267 |
+
"governor": 0,
|
268 |
+
"governorGloss": "ROOT",
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"dep": "cop",
|
272 |
+
"dependent": 2,
|
273 |
+
"dependentGloss": "is",
|
274 |
+
"governor": 1,
|
275 |
+
"governorGloss": "What",
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"dep": "det",
|
279 |
+
"dependent": 3,
|
280 |
+
"dependentGloss": "the",
|
281 |
+
"governor": 4,
|
282 |
+
"governorGloss": "airspeed",
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"dep": "nsubj",
|
286 |
+
"dependent": 4,
|
287 |
+
"dependentGloss": "airspeed",
|
288 |
+
"governor": 1,
|
289 |
+
"governorGloss": "What",
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"dep": "case",
|
293 |
+
"dependent": 5,
|
294 |
+
"dependentGloss": "of",
|
295 |
+
"governor": 8,
|
296 |
+
"governorGloss": "swallow",
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"dep": "det",
|
300 |
+
"dependent": 6,
|
301 |
+
"dependentGloss": "an",
|
302 |
+
"governor": 8,
|
303 |
+
"governorGloss": "swallow",
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"dep": "compound",
|
307 |
+
"dependent": 7,
|
308 |
+
"dependentGloss": "unladen",
|
309 |
+
"governor": 8,
|
310 |
+
"governorGloss": "swallow",
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"dep": "nmod",
|
314 |
+
"dependent": 8,
|
315 |
+
"dependentGloss": "swallow",
|
316 |
+
"governor": 4,
|
317 |
+
"governorGloss": "airspeed",
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"dep": "punct",
|
321 |
+
"dependent": 9,
|
322 |
+
"dependentGloss": "?",
|
323 |
+
"governor": 1,
|
324 |
+
"governorGloss": "What",
|
325 |
+
},
|
326 |
+
],
|
327 |
+
"enhancedDependencies": [
|
328 |
+
{
|
329 |
+
"dep": "ROOT",
|
330 |
+
"dependent": 1,
|
331 |
+
"dependentGloss": "What",
|
332 |
+
"governor": 0,
|
333 |
+
"governorGloss": "ROOT",
|
334 |
+
},
|
335 |
+
{
|
336 |
+
"dep": "cop",
|
337 |
+
"dependent": 2,
|
338 |
+
"dependentGloss": "is",
|
339 |
+
"governor": 1,
|
340 |
+
"governorGloss": "What",
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"dep": "det",
|
344 |
+
"dependent": 3,
|
345 |
+
"dependentGloss": "the",
|
346 |
+
"governor": 4,
|
347 |
+
"governorGloss": "airspeed",
|
348 |
+
},
|
349 |
+
{
|
350 |
+
"dep": "nsubj",
|
351 |
+
"dependent": 4,
|
352 |
+
"dependentGloss": "airspeed",
|
353 |
+
"governor": 1,
|
354 |
+
"governorGloss": "What",
|
355 |
+
},
|
356 |
+
{
|
357 |
+
"dep": "case",
|
358 |
+
"dependent": 5,
|
359 |
+
"dependentGloss": "of",
|
360 |
+
"governor": 8,
|
361 |
+
"governorGloss": "swallow",
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"dep": "det",
|
365 |
+
"dependent": 6,
|
366 |
+
"dependentGloss": "an",
|
367 |
+
"governor": 8,
|
368 |
+
"governorGloss": "swallow",
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"dep": "compound",
|
372 |
+
"dependent": 7,
|
373 |
+
"dependentGloss": "unladen",
|
374 |
+
"governor": 8,
|
375 |
+
"governorGloss": "swallow",
|
376 |
+
},
|
377 |
+
{
|
378 |
+
"dep": "nmod:of",
|
379 |
+
"dependent": 8,
|
380 |
+
"dependentGloss": "swallow",
|
381 |
+
"governor": 4,
|
382 |
+
"governorGloss": "airspeed",
|
383 |
+
},
|
384 |
+
{
|
385 |
+
"dep": "punct",
|
386 |
+
"dependent": 9,
|
387 |
+
"dependentGloss": "?",
|
388 |
+
"governor": 1,
|
389 |
+
"governorGloss": "What",
|
390 |
+
},
|
391 |
+
],
|
392 |
+
"enhancedPlusPlusDependencies": [
|
393 |
+
{
|
394 |
+
"dep": "ROOT",
|
395 |
+
"dependent": 1,
|
396 |
+
"dependentGloss": "What",
|
397 |
+
"governor": 0,
|
398 |
+
"governorGloss": "ROOT",
|
399 |
+
},
|
400 |
+
{
|
401 |
+
"dep": "cop",
|
402 |
+
"dependent": 2,
|
403 |
+
"dependentGloss": "is",
|
404 |
+
"governor": 1,
|
405 |
+
"governorGloss": "What",
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"dep": "det",
|
409 |
+
"dependent": 3,
|
410 |
+
"dependentGloss": "the",
|
411 |
+
"governor": 4,
|
412 |
+
"governorGloss": "airspeed",
|
413 |
+
},
|
414 |
+
{
|
415 |
+
"dep": "nsubj",
|
416 |
+
"dependent": 4,
|
417 |
+
"dependentGloss": "airspeed",
|
418 |
+
"governor": 1,
|
419 |
+
"governorGloss": "What",
|
420 |
+
},
|
421 |
+
{
|
422 |
+
"dep": "case",
|
423 |
+
"dependent": 5,
|
424 |
+
"dependentGloss": "of",
|
425 |
+
"governor": 8,
|
426 |
+
"governorGloss": "swallow",
|
427 |
+
},
|
428 |
+
{
|
429 |
+
"dep": "det",
|
430 |
+
"dependent": 6,
|
431 |
+
"dependentGloss": "an",
|
432 |
+
"governor": 8,
|
433 |
+
"governorGloss": "swallow",
|
434 |
+
},
|
435 |
+
{
|
436 |
+
"dep": "compound",
|
437 |
+
"dependent": 7,
|
438 |
+
"dependentGloss": "unladen",
|
439 |
+
"governor": 8,
|
440 |
+
"governorGloss": "swallow",
|
441 |
+
},
|
442 |
+
{
|
443 |
+
"dep": "nmod:of",
|
444 |
+
"dependent": 8,
|
445 |
+
"dependentGloss": "swallow",
|
446 |
+
"governor": 4,
|
447 |
+
"governorGloss": "airspeed",
|
448 |
+
},
|
449 |
+
{
|
450 |
+
"dep": "punct",
|
451 |
+
"dependent": 9,
|
452 |
+
"dependentGloss": "?",
|
453 |
+
"governor": 1,
|
454 |
+
"governorGloss": "What",
|
455 |
+
},
|
456 |
+
],
|
457 |
+
"index": 0,
|
458 |
+
"parse": "(ROOT\n (SBARQ\n (WHNP (WP What))\n (SQ (VBZ is)\n (NP\n (NP (DT the) (NN airspeed))\n (PP (IN of)\n (NP (DT an) (NN unladen) (NN swallow)))))\n (. ?)))",
|
459 |
+
"tokens": [
|
460 |
+
{
|
461 |
+
"after": " ",
|
462 |
+
"before": "",
|
463 |
+
"characterOffsetBegin": 0,
|
464 |
+
"characterOffsetEnd": 4,
|
465 |
+
"index": 1,
|
466 |
+
"lemma": "what",
|
467 |
+
"originalText": "What",
|
468 |
+
"pos": "WP",
|
469 |
+
"word": "What",
|
470 |
+
},
|
471 |
+
{
|
472 |
+
"after": " ",
|
473 |
+
"before": " ",
|
474 |
+
"characterOffsetBegin": 5,
|
475 |
+
"characterOffsetEnd": 7,
|
476 |
+
"index": 2,
|
477 |
+
"lemma": "be",
|
478 |
+
"originalText": "is",
|
479 |
+
"pos": "VBZ",
|
480 |
+
"word": "is",
|
481 |
+
},
|
482 |
+
{
|
483 |
+
"after": " ",
|
484 |
+
"before": " ",
|
485 |
+
"characterOffsetBegin": 8,
|
486 |
+
"characterOffsetEnd": 11,
|
487 |
+
"index": 3,
|
488 |
+
"lemma": "the",
|
489 |
+
"originalText": "the",
|
490 |
+
"pos": "DT",
|
491 |
+
"word": "the",
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"after": " ",
|
495 |
+
"before": " ",
|
496 |
+
"characterOffsetBegin": 12,
|
497 |
+
"characterOffsetEnd": 20,
|
498 |
+
"index": 4,
|
499 |
+
"lemma": "airspeed",
|
500 |
+
"originalText": "airspeed",
|
501 |
+
"pos": "NN",
|
502 |
+
"word": "airspeed",
|
503 |
+
},
|
504 |
+
{
|
505 |
+
"after": " ",
|
506 |
+
"before": " ",
|
507 |
+
"characterOffsetBegin": 21,
|
508 |
+
"characterOffsetEnd": 23,
|
509 |
+
"index": 5,
|
510 |
+
"lemma": "of",
|
511 |
+
"originalText": "of",
|
512 |
+
"pos": "IN",
|
513 |
+
"word": "of",
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"after": " ",
|
517 |
+
"before": " ",
|
518 |
+
"characterOffsetBegin": 24,
|
519 |
+
"characterOffsetEnd": 26,
|
520 |
+
"index": 6,
|
521 |
+
"lemma": "a",
|
522 |
+
"originalText": "an",
|
523 |
+
"pos": "DT",
|
524 |
+
"word": "an",
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"after": " ",
|
528 |
+
"before": " ",
|
529 |
+
"characterOffsetBegin": 27,
|
530 |
+
"characterOffsetEnd": 34,
|
531 |
+
"index": 7,
|
532 |
+
"lemma": "unladen",
|
533 |
+
"originalText": "unladen",
|
534 |
+
"pos": "JJ",
|
535 |
+
"word": "unladen",
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"after": " ",
|
539 |
+
"before": " ",
|
540 |
+
"characterOffsetBegin": 35,
|
541 |
+
"characterOffsetEnd": 42,
|
542 |
+
"index": 8,
|
543 |
+
"lemma": "swallow",
|
544 |
+
"originalText": "swallow",
|
545 |
+
"pos": "VB",
|
546 |
+
"word": "swallow",
|
547 |
+
},
|
548 |
+
{
|
549 |
+
"after": "",
|
550 |
+
"before": " ",
|
551 |
+
"characterOffsetBegin": 43,
|
552 |
+
"characterOffsetEnd": 44,
|
553 |
+
"index": 9,
|
554 |
+
"lemma": "?",
|
555 |
+
"originalText": "?",
|
556 |
+
"pos": ".",
|
557 |
+
"word": "?",
|
558 |
+
},
|
559 |
+
],
|
560 |
+
}
|
561 |
+
]
|
562 |
+
}
|
563 |
+
corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
|
564 |
+
|
565 |
+
input_tokens = "What is the airspeed of an unladen swallow ?".split()
|
566 |
+
expected_output = [
|
567 |
+
("What", "WP"),
|
568 |
+
("is", "VBZ"),
|
569 |
+
("the", "DT"),
|
570 |
+
("airspeed", "NN"),
|
571 |
+
("of", "IN"),
|
572 |
+
("an", "DT"),
|
573 |
+
("unladen", "JJ"),
|
574 |
+
("swallow", "VB"),
|
575 |
+
("?", "."),
|
576 |
+
]
|
577 |
+
tagged_output = corenlp_tagger.tag(input_tokens)
|
578 |
+
|
579 |
+
corenlp_tagger.api_call.assert_called_once_with(
|
580 |
+
"What is the airspeed of an unladen swallow ?",
|
581 |
+
properties={
|
582 |
+
"ssplit.isOneSentence": "true",
|
583 |
+
"annotators": "tokenize,ssplit,pos",
|
584 |
+
},
|
585 |
+
)
|
586 |
+
self.assertEqual(expected_output, tagged_output)
|
587 |
+
|
588 |
+
def test_ner_tagger(self):
|
589 |
+
corenlp_tagger = corenlp.CoreNLPParser(tagtype="ner")
|
590 |
+
|
591 |
+
api_return_value = {
|
592 |
+
"sentences": [
|
593 |
+
{
|
594 |
+
"index": 0,
|
595 |
+
"tokens": [
|
596 |
+
{
|
597 |
+
"after": " ",
|
598 |
+
"before": "",
|
599 |
+
"characterOffsetBegin": 0,
|
600 |
+
"characterOffsetEnd": 4,
|
601 |
+
"index": 1,
|
602 |
+
"lemma": "Rami",
|
603 |
+
"ner": "PERSON",
|
604 |
+
"originalText": "Rami",
|
605 |
+
"pos": "NNP",
|
606 |
+
"word": "Rami",
|
607 |
+
},
|
608 |
+
{
|
609 |
+
"after": " ",
|
610 |
+
"before": " ",
|
611 |
+
"characterOffsetBegin": 5,
|
612 |
+
"characterOffsetEnd": 8,
|
613 |
+
"index": 2,
|
614 |
+
"lemma": "Eid",
|
615 |
+
"ner": "PERSON",
|
616 |
+
"originalText": "Eid",
|
617 |
+
"pos": "NNP",
|
618 |
+
"word": "Eid",
|
619 |
+
},
|
620 |
+
{
|
621 |
+
"after": " ",
|
622 |
+
"before": " ",
|
623 |
+
"characterOffsetBegin": 9,
|
624 |
+
"characterOffsetEnd": 11,
|
625 |
+
"index": 3,
|
626 |
+
"lemma": "be",
|
627 |
+
"ner": "O",
|
628 |
+
"originalText": "is",
|
629 |
+
"pos": "VBZ",
|
630 |
+
"word": "is",
|
631 |
+
},
|
632 |
+
{
|
633 |
+
"after": " ",
|
634 |
+
"before": " ",
|
635 |
+
"characterOffsetBegin": 12,
|
636 |
+
"characterOffsetEnd": 20,
|
637 |
+
"index": 4,
|
638 |
+
"lemma": "study",
|
639 |
+
"ner": "O",
|
640 |
+
"originalText": "studying",
|
641 |
+
"pos": "VBG",
|
642 |
+
"word": "studying",
|
643 |
+
},
|
644 |
+
{
|
645 |
+
"after": " ",
|
646 |
+
"before": " ",
|
647 |
+
"characterOffsetBegin": 21,
|
648 |
+
"characterOffsetEnd": 23,
|
649 |
+
"index": 5,
|
650 |
+
"lemma": "at",
|
651 |
+
"ner": "O",
|
652 |
+
"originalText": "at",
|
653 |
+
"pos": "IN",
|
654 |
+
"word": "at",
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"after": " ",
|
658 |
+
"before": " ",
|
659 |
+
"characterOffsetBegin": 24,
|
660 |
+
"characterOffsetEnd": 29,
|
661 |
+
"index": 6,
|
662 |
+
"lemma": "Stony",
|
663 |
+
"ner": "ORGANIZATION",
|
664 |
+
"originalText": "Stony",
|
665 |
+
"pos": "NNP",
|
666 |
+
"word": "Stony",
|
667 |
+
},
|
668 |
+
{
|
669 |
+
"after": " ",
|
670 |
+
"before": " ",
|
671 |
+
"characterOffsetBegin": 30,
|
672 |
+
"characterOffsetEnd": 35,
|
673 |
+
"index": 7,
|
674 |
+
"lemma": "Brook",
|
675 |
+
"ner": "ORGANIZATION",
|
676 |
+
"originalText": "Brook",
|
677 |
+
"pos": "NNP",
|
678 |
+
"word": "Brook",
|
679 |
+
},
|
680 |
+
{
|
681 |
+
"after": " ",
|
682 |
+
"before": " ",
|
683 |
+
"characterOffsetBegin": 36,
|
684 |
+
"characterOffsetEnd": 46,
|
685 |
+
"index": 8,
|
686 |
+
"lemma": "University",
|
687 |
+
"ner": "ORGANIZATION",
|
688 |
+
"originalText": "University",
|
689 |
+
"pos": "NNP",
|
690 |
+
"word": "University",
|
691 |
+
},
|
692 |
+
{
|
693 |
+
"after": " ",
|
694 |
+
"before": " ",
|
695 |
+
"characterOffsetBegin": 47,
|
696 |
+
"characterOffsetEnd": 49,
|
697 |
+
"index": 9,
|
698 |
+
"lemma": "in",
|
699 |
+
"ner": "O",
|
700 |
+
"originalText": "in",
|
701 |
+
"pos": "IN",
|
702 |
+
"word": "in",
|
703 |
+
},
|
704 |
+
{
|
705 |
+
"after": "",
|
706 |
+
"before": " ",
|
707 |
+
"characterOffsetBegin": 50,
|
708 |
+
"characterOffsetEnd": 52,
|
709 |
+
"index": 10,
|
710 |
+
"lemma": "NY",
|
711 |
+
"ner": "O",
|
712 |
+
"originalText": "NY",
|
713 |
+
"pos": "NNP",
|
714 |
+
"word": "NY",
|
715 |
+
},
|
716 |
+
],
|
717 |
+
}
|
718 |
+
]
|
719 |
+
}
|
720 |
+
|
721 |
+
corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
|
722 |
+
|
723 |
+
input_tokens = "Rami Eid is studying at Stony Brook University in NY".split()
|
724 |
+
expected_output = [
|
725 |
+
("Rami", "PERSON"),
|
726 |
+
("Eid", "PERSON"),
|
727 |
+
("is", "O"),
|
728 |
+
("studying", "O"),
|
729 |
+
("at", "O"),
|
730 |
+
("Stony", "ORGANIZATION"),
|
731 |
+
("Brook", "ORGANIZATION"),
|
732 |
+
("University", "ORGANIZATION"),
|
733 |
+
("in", "O"),
|
734 |
+
("NY", "O"),
|
735 |
+
]
|
736 |
+
tagged_output = corenlp_tagger.tag(input_tokens)
|
737 |
+
|
738 |
+
corenlp_tagger.api_call.assert_called_once_with(
|
739 |
+
"Rami Eid is studying at Stony Brook University in NY",
|
740 |
+
properties={
|
741 |
+
"ssplit.isOneSentence": "true",
|
742 |
+
"annotators": "tokenize,ssplit,ner",
|
743 |
+
},
|
744 |
+
)
|
745 |
+
self.assertEqual(expected_output, tagged_output)
|
746 |
+
|
747 |
+
def test_unexpected_tagtype(self):
|
748 |
+
with self.assertRaises(ValueError):
|
749 |
+
corenlp_tagger = corenlp.CoreNLPParser(tagtype="test")
|
750 |
+
|
751 |
+
|
752 |
+
class TestParserAPI(TestCase):
|
753 |
+
def test_parse(self):
|
754 |
+
corenlp_parser = corenlp.CoreNLPParser()
|
755 |
+
|
756 |
+
api_return_value = {
|
757 |
+
"sentences": [
|
758 |
+
{
|
759 |
+
"basicDependencies": [
|
760 |
+
{
|
761 |
+
"dep": "ROOT",
|
762 |
+
"dependent": 4,
|
763 |
+
"dependentGloss": "fox",
|
764 |
+
"governor": 0,
|
765 |
+
"governorGloss": "ROOT",
|
766 |
+
},
|
767 |
+
{
|
768 |
+
"dep": "det",
|
769 |
+
"dependent": 1,
|
770 |
+
"dependentGloss": "The",
|
771 |
+
"governor": 4,
|
772 |
+
"governorGloss": "fox",
|
773 |
+
},
|
774 |
+
{
|
775 |
+
"dep": "amod",
|
776 |
+
"dependent": 2,
|
777 |
+
"dependentGloss": "quick",
|
778 |
+
"governor": 4,
|
779 |
+
"governorGloss": "fox",
|
780 |
+
},
|
781 |
+
{
|
782 |
+
"dep": "amod",
|
783 |
+
"dependent": 3,
|
784 |
+
"dependentGloss": "brown",
|
785 |
+
"governor": 4,
|
786 |
+
"governorGloss": "fox",
|
787 |
+
},
|
788 |
+
{
|
789 |
+
"dep": "dep",
|
790 |
+
"dependent": 5,
|
791 |
+
"dependentGloss": "jumps",
|
792 |
+
"governor": 4,
|
793 |
+
"governorGloss": "fox",
|
794 |
+
},
|
795 |
+
{
|
796 |
+
"dep": "case",
|
797 |
+
"dependent": 6,
|
798 |
+
"dependentGloss": "over",
|
799 |
+
"governor": 9,
|
800 |
+
"governorGloss": "dog",
|
801 |
+
},
|
802 |
+
{
|
803 |
+
"dep": "det",
|
804 |
+
"dependent": 7,
|
805 |
+
"dependentGloss": "the",
|
806 |
+
"governor": 9,
|
807 |
+
"governorGloss": "dog",
|
808 |
+
},
|
809 |
+
{
|
810 |
+
"dep": "amod",
|
811 |
+
"dependent": 8,
|
812 |
+
"dependentGloss": "lazy",
|
813 |
+
"governor": 9,
|
814 |
+
"governorGloss": "dog",
|
815 |
+
},
|
816 |
+
{
|
817 |
+
"dep": "nmod",
|
818 |
+
"dependent": 9,
|
819 |
+
"dependentGloss": "dog",
|
820 |
+
"governor": 5,
|
821 |
+
"governorGloss": "jumps",
|
822 |
+
},
|
823 |
+
],
|
824 |
+
"enhancedDependencies": [
|
825 |
+
{
|
826 |
+
"dep": "ROOT",
|
827 |
+
"dependent": 4,
|
828 |
+
"dependentGloss": "fox",
|
829 |
+
"governor": 0,
|
830 |
+
"governorGloss": "ROOT",
|
831 |
+
},
|
832 |
+
{
|
833 |
+
"dep": "det",
|
834 |
+
"dependent": 1,
|
835 |
+
"dependentGloss": "The",
|
836 |
+
"governor": 4,
|
837 |
+
"governorGloss": "fox",
|
838 |
+
},
|
839 |
+
{
|
840 |
+
"dep": "amod",
|
841 |
+
"dependent": 2,
|
842 |
+
"dependentGloss": "quick",
|
843 |
+
"governor": 4,
|
844 |
+
"governorGloss": "fox",
|
845 |
+
},
|
846 |
+
{
|
847 |
+
"dep": "amod",
|
848 |
+
"dependent": 3,
|
849 |
+
"dependentGloss": "brown",
|
850 |
+
"governor": 4,
|
851 |
+
"governorGloss": "fox",
|
852 |
+
},
|
853 |
+
{
|
854 |
+
"dep": "dep",
|
855 |
+
"dependent": 5,
|
856 |
+
"dependentGloss": "jumps",
|
857 |
+
"governor": 4,
|
858 |
+
"governorGloss": "fox",
|
859 |
+
},
|
860 |
+
{
|
861 |
+
"dep": "case",
|
862 |
+
"dependent": 6,
|
863 |
+
"dependentGloss": "over",
|
864 |
+
"governor": 9,
|
865 |
+
"governorGloss": "dog",
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"dep": "det",
|
869 |
+
"dependent": 7,
|
870 |
+
"dependentGloss": "the",
|
871 |
+
"governor": 9,
|
872 |
+
"governorGloss": "dog",
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"dep": "amod",
|
876 |
+
"dependent": 8,
|
877 |
+
"dependentGloss": "lazy",
|
878 |
+
"governor": 9,
|
879 |
+
"governorGloss": "dog",
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"dep": "nmod:over",
|
883 |
+
"dependent": 9,
|
884 |
+
"dependentGloss": "dog",
|
885 |
+
"governor": 5,
|
886 |
+
"governorGloss": "jumps",
|
887 |
+
},
|
888 |
+
],
|
889 |
+
"enhancedPlusPlusDependencies": [
|
890 |
+
{
|
891 |
+
"dep": "ROOT",
|
892 |
+
"dependent": 4,
|
893 |
+
"dependentGloss": "fox",
|
894 |
+
"governor": 0,
|
895 |
+
"governorGloss": "ROOT",
|
896 |
+
},
|
897 |
+
{
|
898 |
+
"dep": "det",
|
899 |
+
"dependent": 1,
|
900 |
+
"dependentGloss": "The",
|
901 |
+
"governor": 4,
|
902 |
+
"governorGloss": "fox",
|
903 |
+
},
|
904 |
+
{
|
905 |
+
"dep": "amod",
|
906 |
+
"dependent": 2,
|
907 |
+
"dependentGloss": "quick",
|
908 |
+
"governor": 4,
|
909 |
+
"governorGloss": "fox",
|
910 |
+
},
|
911 |
+
{
|
912 |
+
"dep": "amod",
|
913 |
+
"dependent": 3,
|
914 |
+
"dependentGloss": "brown",
|
915 |
+
"governor": 4,
|
916 |
+
"governorGloss": "fox",
|
917 |
+
},
|
918 |
+
{
|
919 |
+
"dep": "dep",
|
920 |
+
"dependent": 5,
|
921 |
+
"dependentGloss": "jumps",
|
922 |
+
"governor": 4,
|
923 |
+
"governorGloss": "fox",
|
924 |
+
},
|
925 |
+
{
|
926 |
+
"dep": "case",
|
927 |
+
"dependent": 6,
|
928 |
+
"dependentGloss": "over",
|
929 |
+
"governor": 9,
|
930 |
+
"governorGloss": "dog",
|
931 |
+
},
|
932 |
+
{
|
933 |
+
"dep": "det",
|
934 |
+
"dependent": 7,
|
935 |
+
"dependentGloss": "the",
|
936 |
+
"governor": 9,
|
937 |
+
"governorGloss": "dog",
|
938 |
+
},
|
939 |
+
{
|
940 |
+
"dep": "amod",
|
941 |
+
"dependent": 8,
|
942 |
+
"dependentGloss": "lazy",
|
943 |
+
"governor": 9,
|
944 |
+
"governorGloss": "dog",
|
945 |
+
},
|
946 |
+
{
|
947 |
+
"dep": "nmod:over",
|
948 |
+
"dependent": 9,
|
949 |
+
"dependentGloss": "dog",
|
950 |
+
"governor": 5,
|
951 |
+
"governorGloss": "jumps",
|
952 |
+
},
|
953 |
+
],
|
954 |
+
"index": 0,
|
955 |
+
"parse": "(ROOT\n (NP\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\n (NP\n (NP (NNS jumps))\n (PP (IN over)\n (NP (DT the) (JJ lazy) (NN dog))))))",
|
956 |
+
"tokens": [
|
957 |
+
{
|
958 |
+
"after": " ",
|
959 |
+
"before": "",
|
960 |
+
"characterOffsetBegin": 0,
|
961 |
+
"characterOffsetEnd": 3,
|
962 |
+
"index": 1,
|
963 |
+
"lemma": "the",
|
964 |
+
"originalText": "The",
|
965 |
+
"pos": "DT",
|
966 |
+
"word": "The",
|
967 |
+
},
|
968 |
+
{
|
969 |
+
"after": " ",
|
970 |
+
"before": " ",
|
971 |
+
"characterOffsetBegin": 4,
|
972 |
+
"characterOffsetEnd": 9,
|
973 |
+
"index": 2,
|
974 |
+
"lemma": "quick",
|
975 |
+
"originalText": "quick",
|
976 |
+
"pos": "JJ",
|
977 |
+
"word": "quick",
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"after": " ",
|
981 |
+
"before": " ",
|
982 |
+
"characterOffsetBegin": 10,
|
983 |
+
"characterOffsetEnd": 15,
|
984 |
+
"index": 3,
|
985 |
+
"lemma": "brown",
|
986 |
+
"originalText": "brown",
|
987 |
+
"pos": "JJ",
|
988 |
+
"word": "brown",
|
989 |
+
},
|
990 |
+
{
|
991 |
+
"after": " ",
|
992 |
+
"before": " ",
|
993 |
+
"characterOffsetBegin": 16,
|
994 |
+
"characterOffsetEnd": 19,
|
995 |
+
"index": 4,
|
996 |
+
"lemma": "fox",
|
997 |
+
"originalText": "fox",
|
998 |
+
"pos": "NN",
|
999 |
+
"word": "fox",
|
1000 |
+
},
|
1001 |
+
{
|
1002 |
+
"after": " ",
|
1003 |
+
"before": " ",
|
1004 |
+
"characterOffsetBegin": 20,
|
1005 |
+
"characterOffsetEnd": 25,
|
1006 |
+
"index": 5,
|
1007 |
+
"lemma": "jump",
|
1008 |
+
"originalText": "jumps",
|
1009 |
+
"pos": "VBZ",
|
1010 |
+
"word": "jumps",
|
1011 |
+
},
|
1012 |
+
{
|
1013 |
+
"after": " ",
|
1014 |
+
"before": " ",
|
1015 |
+
"characterOffsetBegin": 26,
|
1016 |
+
"characterOffsetEnd": 30,
|
1017 |
+
"index": 6,
|
1018 |
+
"lemma": "over",
|
1019 |
+
"originalText": "over",
|
1020 |
+
"pos": "IN",
|
1021 |
+
"word": "over",
|
1022 |
+
},
|
1023 |
+
{
|
1024 |
+
"after": " ",
|
1025 |
+
"before": " ",
|
1026 |
+
"characterOffsetBegin": 31,
|
1027 |
+
"characterOffsetEnd": 34,
|
1028 |
+
"index": 7,
|
1029 |
+
"lemma": "the",
|
1030 |
+
"originalText": "the",
|
1031 |
+
"pos": "DT",
|
1032 |
+
"word": "the",
|
1033 |
+
},
|
1034 |
+
{
|
1035 |
+
"after": " ",
|
1036 |
+
"before": " ",
|
1037 |
+
"characterOffsetBegin": 35,
|
1038 |
+
"characterOffsetEnd": 39,
|
1039 |
+
"index": 8,
|
1040 |
+
"lemma": "lazy",
|
1041 |
+
"originalText": "lazy",
|
1042 |
+
"pos": "JJ",
|
1043 |
+
"word": "lazy",
|
1044 |
+
},
|
1045 |
+
{
|
1046 |
+
"after": "",
|
1047 |
+
"before": " ",
|
1048 |
+
"characterOffsetBegin": 40,
|
1049 |
+
"characterOffsetEnd": 43,
|
1050 |
+
"index": 9,
|
1051 |
+
"lemma": "dog",
|
1052 |
+
"originalText": "dog",
|
1053 |
+
"pos": "NN",
|
1054 |
+
"word": "dog",
|
1055 |
+
},
|
1056 |
+
],
|
1057 |
+
}
|
1058 |
+
]
|
1059 |
+
}
|
1060 |
+
|
1061 |
+
corenlp_parser.api_call = MagicMock(return_value=api_return_value)
|
1062 |
+
|
1063 |
+
input_string = "The quick brown fox jumps over the lazy dog".split()
|
1064 |
+
expected_output = Tree(
|
1065 |
+
"ROOT",
|
1066 |
+
[
|
1067 |
+
Tree(
|
1068 |
+
"NP",
|
1069 |
+
[
|
1070 |
+
Tree(
|
1071 |
+
"NP",
|
1072 |
+
[
|
1073 |
+
Tree("DT", ["The"]),
|
1074 |
+
Tree("JJ", ["quick"]),
|
1075 |
+
Tree("JJ", ["brown"]),
|
1076 |
+
Tree("NN", ["fox"]),
|
1077 |
+
],
|
1078 |
+
),
|
1079 |
+
Tree(
|
1080 |
+
"NP",
|
1081 |
+
[
|
1082 |
+
Tree("NP", [Tree("NNS", ["jumps"])]),
|
1083 |
+
Tree(
|
1084 |
+
"PP",
|
1085 |
+
[
|
1086 |
+
Tree("IN", ["over"]),
|
1087 |
+
Tree(
|
1088 |
+
"NP",
|
1089 |
+
[
|
1090 |
+
Tree("DT", ["the"]),
|
1091 |
+
Tree("JJ", ["lazy"]),
|
1092 |
+
Tree("NN", ["dog"]),
|
1093 |
+
],
|
1094 |
+
),
|
1095 |
+
],
|
1096 |
+
),
|
1097 |
+
],
|
1098 |
+
),
|
1099 |
+
],
|
1100 |
+
)
|
1101 |
+
],
|
1102 |
+
)
|
1103 |
+
|
1104 |
+
parsed_data = next(corenlp_parser.parse(input_string))
|
1105 |
+
|
1106 |
+
corenlp_parser.api_call.assert_called_once_with(
|
1107 |
+
"The quick brown fox jumps over the lazy dog",
|
1108 |
+
properties={"ssplit.eolonly": "true"},
|
1109 |
+
)
|
1110 |
+
self.assertEqual(expected_output, parsed_data)
|
1111 |
+
|
1112 |
+
def test_dependency_parser(self):
|
1113 |
+
corenlp_parser = corenlp.CoreNLPDependencyParser()
|
1114 |
+
|
1115 |
+
api_return_value = {
|
1116 |
+
"sentences": [
|
1117 |
+
{
|
1118 |
+
"basicDependencies": [
|
1119 |
+
{
|
1120 |
+
"dep": "ROOT",
|
1121 |
+
"dependent": 5,
|
1122 |
+
"dependentGloss": "jumps",
|
1123 |
+
"governor": 0,
|
1124 |
+
"governorGloss": "ROOT",
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"dep": "det",
|
1128 |
+
"dependent": 1,
|
1129 |
+
"dependentGloss": "The",
|
1130 |
+
"governor": 4,
|
1131 |
+
"governorGloss": "fox",
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"dep": "amod",
|
1135 |
+
"dependent": 2,
|
1136 |
+
"dependentGloss": "quick",
|
1137 |
+
"governor": 4,
|
1138 |
+
"governorGloss": "fox",
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"dep": "amod",
|
1142 |
+
"dependent": 3,
|
1143 |
+
"dependentGloss": "brown",
|
1144 |
+
"governor": 4,
|
1145 |
+
"governorGloss": "fox",
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"dep": "nsubj",
|
1149 |
+
"dependent": 4,
|
1150 |
+
"dependentGloss": "fox",
|
1151 |
+
"governor": 5,
|
1152 |
+
"governorGloss": "jumps",
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"dep": "case",
|
1156 |
+
"dependent": 6,
|
1157 |
+
"dependentGloss": "over",
|
1158 |
+
"governor": 9,
|
1159 |
+
"governorGloss": "dog",
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"dep": "det",
|
1163 |
+
"dependent": 7,
|
1164 |
+
"dependentGloss": "the",
|
1165 |
+
"governor": 9,
|
1166 |
+
"governorGloss": "dog",
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"dep": "amod",
|
1170 |
+
"dependent": 8,
|
1171 |
+
"dependentGloss": "lazy",
|
1172 |
+
"governor": 9,
|
1173 |
+
"governorGloss": "dog",
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"dep": "nmod",
|
1177 |
+
"dependent": 9,
|
1178 |
+
"dependentGloss": "dog",
|
1179 |
+
"governor": 5,
|
1180 |
+
"governorGloss": "jumps",
|
1181 |
+
},
|
1182 |
+
],
|
1183 |
+
"enhancedDependencies": [
|
1184 |
+
{
|
1185 |
+
"dep": "ROOT",
|
1186 |
+
"dependent": 5,
|
1187 |
+
"dependentGloss": "jumps",
|
1188 |
+
"governor": 0,
|
1189 |
+
"governorGloss": "ROOT",
|
1190 |
+
},
|
1191 |
+
{
|
1192 |
+
"dep": "det",
|
1193 |
+
"dependent": 1,
|
1194 |
+
"dependentGloss": "The",
|
1195 |
+
"governor": 4,
|
1196 |
+
"governorGloss": "fox",
|
1197 |
+
},
|
1198 |
+
{
|
1199 |
+
"dep": "amod",
|
1200 |
+
"dependent": 2,
|
1201 |
+
"dependentGloss": "quick",
|
1202 |
+
"governor": 4,
|
1203 |
+
"governorGloss": "fox",
|
1204 |
+
},
|
1205 |
+
{
|
1206 |
+
"dep": "amod",
|
1207 |
+
"dependent": 3,
|
1208 |
+
"dependentGloss": "brown",
|
1209 |
+
"governor": 4,
|
1210 |
+
"governorGloss": "fox",
|
1211 |
+
},
|
1212 |
+
{
|
1213 |
+
"dep": "nsubj",
|
1214 |
+
"dependent": 4,
|
1215 |
+
"dependentGloss": "fox",
|
1216 |
+
"governor": 5,
|
1217 |
+
"governorGloss": "jumps",
|
1218 |
+
},
|
1219 |
+
{
|
1220 |
+
"dep": "case",
|
1221 |
+
"dependent": 6,
|
1222 |
+
"dependentGloss": "over",
|
1223 |
+
"governor": 9,
|
1224 |
+
"governorGloss": "dog",
|
1225 |
+
},
|
1226 |
+
{
|
1227 |
+
"dep": "det",
|
1228 |
+
"dependent": 7,
|
1229 |
+
"dependentGloss": "the",
|
1230 |
+
"governor": 9,
|
1231 |
+
"governorGloss": "dog",
|
1232 |
+
},
|
1233 |
+
{
|
1234 |
+
"dep": "amod",
|
1235 |
+
"dependent": 8,
|
1236 |
+
"dependentGloss": "lazy",
|
1237 |
+
"governor": 9,
|
1238 |
+
"governorGloss": "dog",
|
1239 |
+
},
|
1240 |
+
{
|
1241 |
+
"dep": "nmod:over",
|
1242 |
+
"dependent": 9,
|
1243 |
+
"dependentGloss": "dog",
|
1244 |
+
"governor": 5,
|
1245 |
+
"governorGloss": "jumps",
|
1246 |
+
},
|
1247 |
+
],
|
1248 |
+
"enhancedPlusPlusDependencies": [
|
1249 |
+
{
|
1250 |
+
"dep": "ROOT",
|
1251 |
+
"dependent": 5,
|
1252 |
+
"dependentGloss": "jumps",
|
1253 |
+
"governor": 0,
|
1254 |
+
"governorGloss": "ROOT",
|
1255 |
+
},
|
1256 |
+
{
|
1257 |
+
"dep": "det",
|
1258 |
+
"dependent": 1,
|
1259 |
+
"dependentGloss": "The",
|
1260 |
+
"governor": 4,
|
1261 |
+
"governorGloss": "fox",
|
1262 |
+
},
|
1263 |
+
{
|
1264 |
+
"dep": "amod",
|
1265 |
+
"dependent": 2,
|
1266 |
+
"dependentGloss": "quick",
|
1267 |
+
"governor": 4,
|
1268 |
+
"governorGloss": "fox",
|
1269 |
+
},
|
1270 |
+
{
|
1271 |
+
"dep": "amod",
|
1272 |
+
"dependent": 3,
|
1273 |
+
"dependentGloss": "brown",
|
1274 |
+
"governor": 4,
|
1275 |
+
"governorGloss": "fox",
|
1276 |
+
},
|
1277 |
+
{
|
1278 |
+
"dep": "nsubj",
|
1279 |
+
"dependent": 4,
|
1280 |
+
"dependentGloss": "fox",
|
1281 |
+
"governor": 5,
|
1282 |
+
"governorGloss": "jumps",
|
1283 |
+
},
|
1284 |
+
{
|
1285 |
+
"dep": "case",
|
1286 |
+
"dependent": 6,
|
1287 |
+
"dependentGloss": "over",
|
1288 |
+
"governor": 9,
|
1289 |
+
"governorGloss": "dog",
|
1290 |
+
},
|
1291 |
+
{
|
1292 |
+
"dep": "det",
|
1293 |
+
"dependent": 7,
|
1294 |
+
"dependentGloss": "the",
|
1295 |
+
"governor": 9,
|
1296 |
+
"governorGloss": "dog",
|
1297 |
+
},
|
1298 |
+
{
|
1299 |
+
"dep": "amod",
|
1300 |
+
"dependent": 8,
|
1301 |
+
"dependentGloss": "lazy",
|
1302 |
+
"governor": 9,
|
1303 |
+
"governorGloss": "dog",
|
1304 |
+
},
|
1305 |
+
{
|
1306 |
+
"dep": "nmod:over",
|
1307 |
+
"dependent": 9,
|
1308 |
+
"dependentGloss": "dog",
|
1309 |
+
"governor": 5,
|
1310 |
+
"governorGloss": "jumps",
|
1311 |
+
},
|
1312 |
+
],
|
1313 |
+
"index": 0,
|
1314 |
+
"tokens": [
|
1315 |
+
{
|
1316 |
+
"after": " ",
|
1317 |
+
"before": "",
|
1318 |
+
"characterOffsetBegin": 0,
|
1319 |
+
"characterOffsetEnd": 3,
|
1320 |
+
"index": 1,
|
1321 |
+
"lemma": "the",
|
1322 |
+
"originalText": "The",
|
1323 |
+
"pos": "DT",
|
1324 |
+
"word": "The",
|
1325 |
+
},
|
1326 |
+
{
|
1327 |
+
"after": " ",
|
1328 |
+
"before": " ",
|
1329 |
+
"characterOffsetBegin": 4,
|
1330 |
+
"characterOffsetEnd": 9,
|
1331 |
+
"index": 2,
|
1332 |
+
"lemma": "quick",
|
1333 |
+
"originalText": "quick",
|
1334 |
+
"pos": "JJ",
|
1335 |
+
"word": "quick",
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"after": " ",
|
1339 |
+
"before": " ",
|
1340 |
+
"characterOffsetBegin": 10,
|
1341 |
+
"characterOffsetEnd": 15,
|
1342 |
+
"index": 3,
|
1343 |
+
"lemma": "brown",
|
1344 |
+
"originalText": "brown",
|
1345 |
+
"pos": "JJ",
|
1346 |
+
"word": "brown",
|
1347 |
+
},
|
1348 |
+
{
|
1349 |
+
"after": " ",
|
1350 |
+
"before": " ",
|
1351 |
+
"characterOffsetBegin": 16,
|
1352 |
+
"characterOffsetEnd": 19,
|
1353 |
+
"index": 4,
|
1354 |
+
"lemma": "fox",
|
1355 |
+
"originalText": "fox",
|
1356 |
+
"pos": "NN",
|
1357 |
+
"word": "fox",
|
1358 |
+
},
|
1359 |
+
{
|
1360 |
+
"after": " ",
|
1361 |
+
"before": " ",
|
1362 |
+
"characterOffsetBegin": 20,
|
1363 |
+
"characterOffsetEnd": 25,
|
1364 |
+
"index": 5,
|
1365 |
+
"lemma": "jump",
|
1366 |
+
"originalText": "jumps",
|
1367 |
+
"pos": "VBZ",
|
1368 |
+
"word": "jumps",
|
1369 |
+
},
|
1370 |
+
{
|
1371 |
+
"after": " ",
|
1372 |
+
"before": " ",
|
1373 |
+
"characterOffsetBegin": 26,
|
1374 |
+
"characterOffsetEnd": 30,
|
1375 |
+
"index": 6,
|
1376 |
+
"lemma": "over",
|
1377 |
+
"originalText": "over",
|
1378 |
+
"pos": "IN",
|
1379 |
+
"word": "over",
|
1380 |
+
},
|
1381 |
+
{
|
1382 |
+
"after": " ",
|
1383 |
+
"before": " ",
|
1384 |
+
"characterOffsetBegin": 31,
|
1385 |
+
"characterOffsetEnd": 34,
|
1386 |
+
"index": 7,
|
1387 |
+
"lemma": "the",
|
1388 |
+
"originalText": "the",
|
1389 |
+
"pos": "DT",
|
1390 |
+
"word": "the",
|
1391 |
+
},
|
1392 |
+
{
|
1393 |
+
"after": " ",
|
1394 |
+
"before": " ",
|
1395 |
+
"characterOffsetBegin": 35,
|
1396 |
+
"characterOffsetEnd": 39,
|
1397 |
+
"index": 8,
|
1398 |
+
"lemma": "lazy",
|
1399 |
+
"originalText": "lazy",
|
1400 |
+
"pos": "JJ",
|
1401 |
+
"word": "lazy",
|
1402 |
+
},
|
1403 |
+
{
|
1404 |
+
"after": "",
|
1405 |
+
"before": " ",
|
1406 |
+
"characterOffsetBegin": 40,
|
1407 |
+
"characterOffsetEnd": 43,
|
1408 |
+
"index": 9,
|
1409 |
+
"lemma": "dog",
|
1410 |
+
"originalText": "dog",
|
1411 |
+
"pos": "NN",
|
1412 |
+
"word": "dog",
|
1413 |
+
},
|
1414 |
+
],
|
1415 |
+
}
|
1416 |
+
]
|
1417 |
+
}
|
1418 |
+
|
1419 |
+
corenlp_parser.api_call = MagicMock(return_value=api_return_value)
|
1420 |
+
|
1421 |
+
input_string = "The quick brown fox jumps over the lazy dog".split()
|
1422 |
+
expected_output = Tree(
|
1423 |
+
"jumps",
|
1424 |
+
[
|
1425 |
+
Tree("fox", ["The", "quick", "brown"]),
|
1426 |
+
Tree("dog", ["over", "the", "lazy"]),
|
1427 |
+
],
|
1428 |
+
)
|
1429 |
+
|
1430 |
+
parsed_data = next(corenlp_parser.parse(input_string))
|
1431 |
+
|
1432 |
+
corenlp_parser.api_call.assert_called_once_with(
|
1433 |
+
"The quick brown fox jumps over the lazy dog",
|
1434 |
+
properties={"ssplit.eolonly": "true"},
|
1435 |
+
)
|
1436 |
+
self.assertEqual(expected_output, parsed_data.tree())
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk.corpus import ( # mwa_ppdb
|
6 |
+
cess_cat,
|
7 |
+
cess_esp,
|
8 |
+
conll2007,
|
9 |
+
floresta,
|
10 |
+
indian,
|
11 |
+
ptb,
|
12 |
+
sinica_treebank,
|
13 |
+
udhr,
|
14 |
+
)
|
15 |
+
from nltk.tree import Tree
|
16 |
+
|
17 |
+
|
18 |
+
class TestUdhr(unittest.TestCase):
|
19 |
+
def test_words(self):
|
20 |
+
for name in udhr.fileids():
|
21 |
+
words = list(udhr.words(name))
|
22 |
+
self.assertTrue(words)
|
23 |
+
|
24 |
+
def test_raw_unicode(self):
|
25 |
+
for name in udhr.fileids():
|
26 |
+
txt = udhr.raw(name)
|
27 |
+
assert not isinstance(txt, bytes), name
|
28 |
+
|
29 |
+
def test_polish_encoding(self):
|
30 |
+
text_pl = udhr.raw("Polish-Latin2")[:164]
|
31 |
+
text_ppl = udhr.raw("Polish_Polski-Latin2")[:164]
|
32 |
+
expected = """POWSZECHNA DEKLARACJA PRAW CZŁOWIEKA
|
33 |
+
[Preamble]
|
34 |
+
Trzecia Sesja Ogólnego Zgromadzenia ONZ, obradująca w Paryżu, \
|
35 |
+
uchwaliła 10 grudnia 1948 roku jednomyślnie Powszechną"""
|
36 |
+
assert text_pl == expected, "Polish-Latin2"
|
37 |
+
assert text_ppl == expected, "Polish_Polski-Latin2"
|
38 |
+
|
39 |
+
|
40 |
+
class TestIndian(unittest.TestCase):
|
41 |
+
def test_words(self):
|
42 |
+
words = indian.words()[:3]
|
43 |
+
self.assertEqual(words, ["মহিষের", "সন্তান", ":"])
|
44 |
+
|
45 |
+
def test_tagged_words(self):
|
46 |
+
tagged_words = indian.tagged_words()[:3]
|
47 |
+
self.assertEqual(
|
48 |
+
tagged_words, [("মহিষের", "NN"), ("সন্তান", "NN"), (":", "SYM")]
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
class TestCess(unittest.TestCase):
|
53 |
+
def test_catalan(self):
|
54 |
+
words = cess_cat.words()[:15]
|
55 |
+
txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial"
|
56 |
+
self.assertEqual(words, txt.split())
|
57 |
+
self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs")
|
58 |
+
|
59 |
+
def test_esp(self):
|
60 |
+
words = cess_esp.words()[:15]
|
61 |
+
txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del"
|
62 |
+
self.assertEqual(words, txt.split())
|
63 |
+
self.assertEqual(cess_esp.words()[115], "años")
|
64 |
+
|
65 |
+
|
66 |
+
class TestFloresta(unittest.TestCase):
|
67 |
+
def test_words(self):
|
68 |
+
words = floresta.words()[:10]
|
69 |
+
txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a"
|
70 |
+
self.assertEqual(words, txt.split())
|
71 |
+
|
72 |
+
|
73 |
+
class TestSinicaTreebank(unittest.TestCase):
|
74 |
+
def test_sents(self):
|
75 |
+
first_3_sents = sinica_treebank.sents()[:3]
|
76 |
+
self.assertEqual(
|
77 |
+
first_3_sents, [["一"], ["友情"], ["嘉珍", "和", "我", "住在", "同一條", "巷子"]]
|
78 |
+
)
|
79 |
+
|
80 |
+
def test_parsed_sents(self):
|
81 |
+
parsed_sents = sinica_treebank.parsed_sents()[25]
|
82 |
+
self.assertEqual(
|
83 |
+
parsed_sents,
|
84 |
+
Tree(
|
85 |
+
"S",
|
86 |
+
[
|
87 |
+
Tree("NP", [Tree("Nba", ["嘉珍"])]),
|
88 |
+
Tree("V‧地", [Tree("VA11", ["不停"]), Tree("DE", ["的"])]),
|
89 |
+
Tree("VA4", ["哭泣"]),
|
90 |
+
],
|
91 |
+
),
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
class TestCoNLL2007(unittest.TestCase):
|
96 |
+
# Reading the CoNLL 2007 Dependency Treebanks
|
97 |
+
|
98 |
+
def test_sents(self):
|
99 |
+
sents = conll2007.sents("esp.train")[0]
|
100 |
+
self.assertEqual(
|
101 |
+
sents[:6], ["El", "aumento", "del", "índice", "de", "desempleo"]
|
102 |
+
)
|
103 |
+
|
104 |
+
def test_parsed_sents(self):
|
105 |
+
|
106 |
+
parsed_sents = conll2007.parsed_sents("esp.train")[0]
|
107 |
+
|
108 |
+
self.assertEqual(
|
109 |
+
parsed_sents.tree(),
|
110 |
+
Tree(
|
111 |
+
"fortaleció",
|
112 |
+
[
|
113 |
+
Tree(
|
114 |
+
"aumento",
|
115 |
+
[
|
116 |
+
"El",
|
117 |
+
Tree(
|
118 |
+
"del",
|
119 |
+
[
|
120 |
+
Tree(
|
121 |
+
"índice",
|
122 |
+
[
|
123 |
+
Tree(
|
124 |
+
"de",
|
125 |
+
[Tree("desempleo", ["estadounidense"])],
|
126 |
+
)
|
127 |
+
],
|
128 |
+
)
|
129 |
+
],
|
130 |
+
),
|
131 |
+
],
|
132 |
+
),
|
133 |
+
"hoy",
|
134 |
+
"considerablemente",
|
135 |
+
Tree(
|
136 |
+
"al",
|
137 |
+
[
|
138 |
+
Tree(
|
139 |
+
"euro",
|
140 |
+
[
|
141 |
+
Tree(
|
142 |
+
"cotizaba",
|
143 |
+
[
|
144 |
+
",",
|
145 |
+
"que",
|
146 |
+
Tree("a", [Tree("15.35", ["las", "GMT"])]),
|
147 |
+
"se",
|
148 |
+
Tree(
|
149 |
+
"en",
|
150 |
+
[
|
151 |
+
Tree(
|
152 |
+
"mercado",
|
153 |
+
[
|
154 |
+
"el",
|
155 |
+
Tree("de", ["divisas"]),
|
156 |
+
Tree("de", ["Fráncfort"]),
|
157 |
+
],
|
158 |
+
)
|
159 |
+
],
|
160 |
+
),
|
161 |
+
Tree("a", ["0,9452_dólares"]),
|
162 |
+
Tree(
|
163 |
+
"frente_a",
|
164 |
+
[
|
165 |
+
",",
|
166 |
+
Tree(
|
167 |
+
"0,9349_dólares",
|
168 |
+
[
|
169 |
+
"los",
|
170 |
+
Tree(
|
171 |
+
"de",
|
172 |
+
[
|
173 |
+
Tree(
|
174 |
+
"mañana",
|
175 |
+
["esta"],
|
176 |
+
)
|
177 |
+
],
|
178 |
+
),
|
179 |
+
],
|
180 |
+
),
|
181 |
+
],
|
182 |
+
),
|
183 |
+
],
|
184 |
+
)
|
185 |
+
],
|
186 |
+
)
|
187 |
+
],
|
188 |
+
),
|
189 |
+
".",
|
190 |
+
],
|
191 |
+
),
|
192 |
+
)
|
193 |
+
|
194 |
+
|
195 |
+
@pytest.mark.skipif(
|
196 |
+
not ptb.fileids(),
|
197 |
+
reason="A full installation of the Penn Treebank is not available",
|
198 |
+
)
|
199 |
+
class TestPTB(unittest.TestCase):
|
200 |
+
def test_fileids(self):
|
201 |
+
self.assertEqual(
|
202 |
+
ptb.fileids()[:4],
|
203 |
+
[
|
204 |
+
"BROWN/CF/CF01.MRG",
|
205 |
+
"BROWN/CF/CF02.MRG",
|
206 |
+
"BROWN/CF/CF03.MRG",
|
207 |
+
"BROWN/CF/CF04.MRG",
|
208 |
+
],
|
209 |
+
)
|
210 |
+
|
211 |
+
def test_words(self):
|
212 |
+
self.assertEqual(
|
213 |
+
ptb.words("WSJ/00/WSJ_0003.MRG")[:7],
|
214 |
+
["A", "form", "of", "asbestos", "once", "used", "*"],
|
215 |
+
)
|
216 |
+
|
217 |
+
def test_tagged_words(self):
|
218 |
+
self.assertEqual(
|
219 |
+
ptb.tagged_words("WSJ/00/WSJ_0003.MRG")[:3],
|
220 |
+
[("A", "DT"), ("form", "NN"), ("of", "IN")],
|
221 |
+
)
|
222 |
+
|
223 |
+
def test_categories(self):
|
224 |
+
self.assertEqual(
|
225 |
+
ptb.categories(),
|
226 |
+
[
|
227 |
+
"adventure",
|
228 |
+
"belles_lettres",
|
229 |
+
"fiction",
|
230 |
+
"humor",
|
231 |
+
"lore",
|
232 |
+
"mystery",
|
233 |
+
"news",
|
234 |
+
"romance",
|
235 |
+
"science_fiction",
|
236 |
+
],
|
237 |
+
)
|
238 |
+
|
239 |
+
def test_news_fileids(self):
|
240 |
+
self.assertEqual(
|
241 |
+
ptb.fileids("news")[:3],
|
242 |
+
["WSJ/00/WSJ_0001.MRG", "WSJ/00/WSJ_0002.MRG", "WSJ/00/WSJ_0003.MRG"],
|
243 |
+
)
|
244 |
+
|
245 |
+
def test_category_words(self):
|
246 |
+
self.assertEqual(
|
247 |
+
ptb.words(categories=["humor", "fiction"])[:6],
|
248 |
+
["Thirty-three", "Scotty", "did", "not", "go", "back"],
|
249 |
+
)
|
250 |
+
|
251 |
+
|
252 |
+
@pytest.mark.skip("Skipping test for mwa_ppdb.")
|
253 |
+
class TestMWAPPDB(unittest.TestCase):
|
254 |
+
def test_fileids(self):
|
255 |
+
self.assertEqual(
|
256 |
+
mwa_ppdb.fileids(), ["ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"]
|
257 |
+
)
|
258 |
+
|
259 |
+
def test_entries(self):
|
260 |
+
self.assertEqual(
|
261 |
+
mwa_ppdb.entries()[:10],
|
262 |
+
[
|
263 |
+
("10/17/01", "17/10/2001"),
|
264 |
+
("102,70", "102.70"),
|
265 |
+
("13,53", "13.53"),
|
266 |
+
("3.2.5.3.2.1", "3.2.5.3.2.1."),
|
267 |
+
("53,76", "53.76"),
|
268 |
+
("6.9.5", "6.9.5."),
|
269 |
+
("7.7.6.3", "7.7.6.3."),
|
270 |
+
("76,20", "76.20"),
|
271 |
+
("79,85", "79.85"),
|
272 |
+
("93,65", "93.65"),
|
273 |
+
],
|
274 |
+
)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Corpus View Regression Tests
|
3 |
+
"""
|
4 |
+
import unittest
|
5 |
+
|
6 |
+
import nltk.data
|
7 |
+
from nltk.corpus.reader.util import (
|
8 |
+
StreamBackedCorpusView,
|
9 |
+
read_line_block,
|
10 |
+
read_whitespace_block,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class TestCorpusViews(unittest.TestCase):
|
15 |
+
|
16 |
+
linetok = nltk.LineTokenizer(blanklines="keep")
|
17 |
+
names = [
|
18 |
+
"corpora/inaugural/README", # A very short file (160 chars)
|
19 |
+
"corpora/inaugural/1793-Washington.txt", # A relatively short file (791 chars)
|
20 |
+
"corpora/inaugural/1909-Taft.txt", # A longer file (32k chars)
|
21 |
+
]
|
22 |
+
|
23 |
+
def data(self):
|
24 |
+
for name in self.names:
|
25 |
+
f = nltk.data.find(name)
|
26 |
+
with f.open() as fp:
|
27 |
+
file_data = fp.read().decode("utf8")
|
28 |
+
yield f, file_data
|
29 |
+
|
30 |
+
def test_correct_values(self):
|
31 |
+
# Check that corpus views produce the correct sequence of values.
|
32 |
+
|
33 |
+
for f, file_data in self.data():
|
34 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
35 |
+
self.assertEqual(list(v), file_data.split())
|
36 |
+
|
37 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
38 |
+
self.assertEqual(list(v), self.linetok.tokenize(file_data))
|
39 |
+
|
40 |
+
def test_correct_length(self):
|
41 |
+
# Check that the corpus views report the correct lengths:
|
42 |
+
|
43 |
+
for f, file_data in self.data():
|
44 |
+
v = StreamBackedCorpusView(f, read_whitespace_block)
|
45 |
+
self.assertEqual(len(v), len(file_data.split()))
|
46 |
+
|
47 |
+
v = StreamBackedCorpusView(f, read_line_block)
|
48 |
+
self.assertEqual(len(v), len(self.linetok.tokenize(file_data)))
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_data.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import nltk.data
|
4 |
+
|
5 |
+
|
6 |
+
def test_find_raises_exception():
|
7 |
+
with pytest.raises(LookupError):
|
8 |
+
nltk.data.find("no_such_resource/foo")
|
9 |
+
|
10 |
+
|
11 |
+
def test_find_raises_exception_with_full_resource_name():
|
12 |
+
no_such_thing = "no_such_thing/bar"
|
13 |
+
with pytest.raises(LookupError) as exc:
|
14 |
+
nltk.data.find(no_such_thing)
|
15 |
+
assert no_such_thing in str(exc)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.metrics.agreement import AnnotationTask
|
4 |
+
|
5 |
+
|
6 |
+
class TestDisagreement(unittest.TestCase):
|
7 |
+
|
8 |
+
"""
|
9 |
+
Class containing unit tests for nltk.metrics.agreement.Disagreement.
|
10 |
+
"""
|
11 |
+
|
12 |
+
def test_easy(self):
|
13 |
+
"""
|
14 |
+
Simple test, based on
|
15 |
+
https://github.com/foolswood/krippendorffs_alpha/raw/master/krippendorff.pdf.
|
16 |
+
"""
|
17 |
+
data = [
|
18 |
+
("coder1", "dress1", "YES"),
|
19 |
+
("coder2", "dress1", "NO"),
|
20 |
+
("coder3", "dress1", "NO"),
|
21 |
+
("coder1", "dress2", "YES"),
|
22 |
+
("coder2", "dress2", "NO"),
|
23 |
+
("coder3", "dress3", "NO"),
|
24 |
+
]
|
25 |
+
annotation_task = AnnotationTask(data)
|
26 |
+
self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
|
27 |
+
|
28 |
+
def test_easy2(self):
|
29 |
+
"""
|
30 |
+
Same simple test with 1 rating removed.
|
31 |
+
Removal of that rating should not matter: K-Apha ignores items with
|
32 |
+
only 1 rating.
|
33 |
+
"""
|
34 |
+
data = [
|
35 |
+
("coder1", "dress1", "YES"),
|
36 |
+
("coder2", "dress1", "NO"),
|
37 |
+
("coder3", "dress1", "NO"),
|
38 |
+
("coder1", "dress2", "YES"),
|
39 |
+
("coder2", "dress2", "NO"),
|
40 |
+
]
|
41 |
+
annotation_task = AnnotationTask(data)
|
42 |
+
self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
|
43 |
+
|
44 |
+
def test_advanced(self):
|
45 |
+
"""
|
46 |
+
More advanced test, based on
|
47 |
+
http://www.agreestat.com/research_papers/onkrippendorffalpha.pdf
|
48 |
+
"""
|
49 |
+
data = [
|
50 |
+
("A", "1", "1"),
|
51 |
+
("B", "1", "1"),
|
52 |
+
("D", "1", "1"),
|
53 |
+
("A", "2", "2"),
|
54 |
+
("B", "2", "2"),
|
55 |
+
("C", "2", "3"),
|
56 |
+
("D", "2", "2"),
|
57 |
+
("A", "3", "3"),
|
58 |
+
("B", "3", "3"),
|
59 |
+
("C", "3", "3"),
|
60 |
+
("D", "3", "3"),
|
61 |
+
("A", "4", "3"),
|
62 |
+
("B", "4", "3"),
|
63 |
+
("C", "4", "3"),
|
64 |
+
("D", "4", "3"),
|
65 |
+
("A", "5", "2"),
|
66 |
+
("B", "5", "2"),
|
67 |
+
("C", "5", "2"),
|
68 |
+
("D", "5", "2"),
|
69 |
+
("A", "6", "1"),
|
70 |
+
("B", "6", "2"),
|
71 |
+
("C", "6", "3"),
|
72 |
+
("D", "6", "4"),
|
73 |
+
("A", "7", "4"),
|
74 |
+
("B", "7", "4"),
|
75 |
+
("C", "7", "4"),
|
76 |
+
("D", "7", "4"),
|
77 |
+
("A", "8", "1"),
|
78 |
+
("B", "8", "1"),
|
79 |
+
("C", "8", "2"),
|
80 |
+
("D", "8", "1"),
|
81 |
+
("A", "9", "2"),
|
82 |
+
("B", "9", "2"),
|
83 |
+
("C", "9", "2"),
|
84 |
+
("D", "9", "2"),
|
85 |
+
("B", "10", "5"),
|
86 |
+
("C", "10", "5"),
|
87 |
+
("D", "10", "5"),
|
88 |
+
("C", "11", "1"),
|
89 |
+
("D", "11", "1"),
|
90 |
+
("C", "12", "3"),
|
91 |
+
]
|
92 |
+
annotation_task = AnnotationTask(data)
|
93 |
+
self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
|
94 |
+
|
95 |
+
def test_advanced2(self):
|
96 |
+
"""
|
97 |
+
Same more advanced example, but with 1 rating removed.
|
98 |
+
Again, removal of that 1 rating should not matter.
|
99 |
+
"""
|
100 |
+
data = [
|
101 |
+
("A", "1", "1"),
|
102 |
+
("B", "1", "1"),
|
103 |
+
("D", "1", "1"),
|
104 |
+
("A", "2", "2"),
|
105 |
+
("B", "2", "2"),
|
106 |
+
("C", "2", "3"),
|
107 |
+
("D", "2", "2"),
|
108 |
+
("A", "3", "3"),
|
109 |
+
("B", "3", "3"),
|
110 |
+
("C", "3", "3"),
|
111 |
+
("D", "3", "3"),
|
112 |
+
("A", "4", "3"),
|
113 |
+
("B", "4", "3"),
|
114 |
+
("C", "4", "3"),
|
115 |
+
("D", "4", "3"),
|
116 |
+
("A", "5", "2"),
|
117 |
+
("B", "5", "2"),
|
118 |
+
("C", "5", "2"),
|
119 |
+
("D", "5", "2"),
|
120 |
+
("A", "6", "1"),
|
121 |
+
("B", "6", "2"),
|
122 |
+
("C", "6", "3"),
|
123 |
+
("D", "6", "4"),
|
124 |
+
("A", "7", "4"),
|
125 |
+
("B", "7", "4"),
|
126 |
+
("C", "7", "4"),
|
127 |
+
("D", "7", "4"),
|
128 |
+
("A", "8", "1"),
|
129 |
+
("B", "8", "1"),
|
130 |
+
("C", "8", "2"),
|
131 |
+
("D", "8", "1"),
|
132 |
+
("A", "9", "2"),
|
133 |
+
("B", "9", "2"),
|
134 |
+
("C", "9", "2"),
|
135 |
+
("D", "9", "2"),
|
136 |
+
("B", "10", "5"),
|
137 |
+
("C", "10", "5"),
|
138 |
+
("D", "10", "5"),
|
139 |
+
("C", "11", "1"),
|
140 |
+
("D", "11", "1"),
|
141 |
+
("C", "12", "3"),
|
142 |
+
]
|
143 |
+
annotation_task = AnnotationTask(data)
|
144 |
+
self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_distance.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from nltk.metrics.distance import edit_distance
|
6 |
+
|
7 |
+
|
8 |
+
class TestEditDistance:
|
9 |
+
@pytest.mark.parametrize(
|
10 |
+
"left,right,substitution_cost,expecteds",
|
11 |
+
[
|
12 |
+
# Allowing transpositions reduces the number of edits required.
|
13 |
+
# with transpositions:
|
14 |
+
# e.g. "abc" -T-> "cba" -D-> "ca": 2 steps
|
15 |
+
#
|
16 |
+
# without transpositions:
|
17 |
+
# e.g. "abc" -D-> "ab" -D-> "a" -I-> "ca": 3 steps
|
18 |
+
("abc", "ca", 1, (2, 3)),
|
19 |
+
("abc", "ca", 5, (2, 3)), # Doesn't *require* substitutions
|
20 |
+
# Note, a substition_cost of higher than 2 doesn't make much
|
21 |
+
# sense, as a deletion + insertion is identical, and always
|
22 |
+
# costs 2.
|
23 |
+
#
|
24 |
+
#
|
25 |
+
# Transpositions don't always reduce the number of edits required:
|
26 |
+
# with or without transpositions:
|
27 |
+
# e.g. "wants" -D-> "wats" -D-> "was" -I-> "wasp": 3 steps
|
28 |
+
("wants", "wasp", 1, (3, 3)),
|
29 |
+
("wants", "wasp", 5, (3, 3)), # Doesn't *require* substitutions
|
30 |
+
#
|
31 |
+
#
|
32 |
+
# Ought to have the same results with and without transpositions
|
33 |
+
# with or without transpositions:
|
34 |
+
# e.g. "rain" -S-> "sain" -S-> "shin" -I-> "shine": 3 steps
|
35 |
+
# (but cost 5 if substitution_cost=2)
|
36 |
+
("rain", "shine", 1, (3, 3)),
|
37 |
+
("rain", "shine", 2, (5, 5)), # Does *require* substitutions
|
38 |
+
#
|
39 |
+
#
|
40 |
+
# Several potentially interesting typos
|
41 |
+
# with transpositions:
|
42 |
+
# e.g. "acbdef" -T-> "abcdef": 1 step
|
43 |
+
#
|
44 |
+
# without transpositions:
|
45 |
+
# e.g. "acbdef" -D-> "abdef" -I-> "abcdef": 2 steps
|
46 |
+
("acbdef", "abcdef", 1, (1, 2)),
|
47 |
+
("acbdef", "abcdef", 2, (1, 2)), # Doesn't *require* substitutions
|
48 |
+
#
|
49 |
+
#
|
50 |
+
# with transpositions:
|
51 |
+
# e.g. "lnaguaeg" -T-> "languaeg" -T-> "language": 2 steps
|
52 |
+
#
|
53 |
+
# without transpositions:
|
54 |
+
# e.g. "lnaguaeg" -D-> "laguaeg" -I-> "languaeg" -D-> "languag" -I-> "language": 4 steps
|
55 |
+
("lnaguaeg", "language", 1, (2, 4)),
|
56 |
+
("lnaguaeg", "language", 2, (2, 4)), # Doesn't *require* substitutions
|
57 |
+
#
|
58 |
+
#
|
59 |
+
# with transpositions:
|
60 |
+
# e.g. "lnaugage" -T-> "lanugage" -T-> "language": 2 steps
|
61 |
+
#
|
62 |
+
# without transpositions:
|
63 |
+
# e.g. "lnaugage" -S-> "lnangage" -D-> "langage" -I-> "language": 3 steps
|
64 |
+
# (but one substitution, so a cost of 4 if substition_cost = 2)
|
65 |
+
("lnaugage", "language", 1, (2, 3)),
|
66 |
+
("lnaugage", "language", 2, (2, 4)),
|
67 |
+
# Does *require* substitutions if no transpositions
|
68 |
+
#
|
69 |
+
#
|
70 |
+
# with transpositions:
|
71 |
+
# e.g. "lngauage" -T-> "lnaguage" -T-> "language": 2 steps
|
72 |
+
# without transpositions:
|
73 |
+
# e.g. "lngauage" -I-> "lanaguage" -D-> "language": 2 steps
|
74 |
+
("lngauage", "language", 1, (2, 2)),
|
75 |
+
("lngauage", "language", 2, (2, 2)), # Doesn't *require* substitutions
|
76 |
+
#
|
77 |
+
#
|
78 |
+
# with or without transpositions:
|
79 |
+
# e.g. "wants" -S-> "sants" -S-> "swnts" -S-> "swits" -S-> "swims" -D-> "swim": 5 steps
|
80 |
+
#
|
81 |
+
# with substitution_cost=2 and transpositions:
|
82 |
+
# e.g. "wants" -T-> "santw" -D-> "sntw" -D-> "stw" -D-> "sw"
|
83 |
+
# -I-> "swi" -I-> "swim": 6 steps
|
84 |
+
#
|
85 |
+
# with substitution_cost=2 and no transpositions:
|
86 |
+
# e.g. "wants" -I-> "swants" -D-> "swant" -D-> "swan" -D-> "swa" -D-> "sw"
|
87 |
+
# -I-> "swi" -I-> "swim": 7 steps
|
88 |
+
("wants", "swim", 1, (5, 5)),
|
89 |
+
("wants", "swim", 2, (6, 7)),
|
90 |
+
#
|
91 |
+
#
|
92 |
+
# with or without transpositions:
|
93 |
+
# e.g. "kitten" -S-> "sitten" -s-> "sittin" -I-> "sitting": 3 steps
|
94 |
+
# (but cost 5 if substitution_cost=2)
|
95 |
+
("kitten", "sitting", 1, (3, 3)),
|
96 |
+
("kitten", "sitting", 2, (5, 5)),
|
97 |
+
#
|
98 |
+
# duplicated letter
|
99 |
+
# e.g. "duplicated" -D-> "duplicated"
|
100 |
+
("duplicated", "duuplicated", 1, (1, 1)),
|
101 |
+
("duplicated", "duuplicated", 2, (1, 1)),
|
102 |
+
("very duplicated", "very duuplicateed", 2, (2, 2)),
|
103 |
+
],
|
104 |
+
)
|
105 |
+
def test_with_transpositions(
|
106 |
+
self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int]
|
107 |
+
):
|
108 |
+
"""
|
109 |
+
Test `edit_distance` between two strings, given some `substitution_cost`,
|
110 |
+
and whether transpositions are allowed.
|
111 |
+
|
112 |
+
:param str left: First input string to `edit_distance`.
|
113 |
+
:param str right: Second input string to `edit_distance`.
|
114 |
+
:param int substitution_cost: The cost of a substitution action in `edit_distance`.
|
115 |
+
:param Tuple[int, int] expecteds: A tuple of expected outputs, such that `expecteds[0]` is
|
116 |
+
the expected output with `transpositions=True`, and `expecteds[1]` is
|
117 |
+
the expected output with `transpositions=False`.
|
118 |
+
"""
|
119 |
+
# Test the input strings in both orderings
|
120 |
+
for s1, s2 in ((left, right), (right, left)):
|
121 |
+
# zip with [True, False] to get the transpositions value
|
122 |
+
for expected, transpositions in zip(expecteds, [True, False]):
|
123 |
+
predicted = edit_distance(
|
124 |
+
s1,
|
125 |
+
s2,
|
126 |
+
substitution_cost=substitution_cost,
|
127 |
+
transpositions=transpositions,
|
128 |
+
)
|
129 |
+
assert predicted == expected
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_downloader.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk import download
|
2 |
+
|
3 |
+
|
4 |
+
def test_downloader_using_existing_parent_download_dir(tmp_path):
|
5 |
+
"""Test that download works properly when the parent folder of the download_dir exists"""
|
6 |
+
|
7 |
+
download_dir = str(tmp_path.joinpath("another_dir"))
|
8 |
+
download_status = download("mwa_ppdb", download_dir)
|
9 |
+
assert download_status is True
|
10 |
+
|
11 |
+
|
12 |
+
def test_downloader_using_non_existing_parent_download_dir(tmp_path):
|
13 |
+
"""Test that download works properly when the parent folder of the download_dir does not exist"""
|
14 |
+
|
15 |
+
download_dir = str(
|
16 |
+
tmp_path.joinpath("non-existing-parent-folder", "another-non-existing-folder")
|
17 |
+
)
|
18 |
+
download_status = download("mwa_ppdb", download_dir)
|
19 |
+
assert download_status is True
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
|
3 |
+
|
4 |
+
def test_iterating_returns_an_iterator_ordered_by_frequency():
|
5 |
+
samples = ["one", "two", "two"]
|
6 |
+
distribution = nltk.FreqDist(samples)
|
7 |
+
assert list(distribution) == ["two", "one"]
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_hmm.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.tag import hmm
|
4 |
+
|
5 |
+
|
6 |
+
def _wikipedia_example_hmm():
|
7 |
+
# Example from wikipedia
|
8 |
+
# (https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm)
|
9 |
+
|
10 |
+
states = ["rain", "no rain"]
|
11 |
+
symbols = ["umbrella", "no umbrella"]
|
12 |
+
|
13 |
+
A = [[0.7, 0.3], [0.3, 0.7]] # transition probabilities
|
14 |
+
B = [[0.9, 0.1], [0.2, 0.8]] # emission probabilities
|
15 |
+
pi = [0.5, 0.5] # initial probabilities
|
16 |
+
|
17 |
+
seq = ["umbrella", "umbrella", "no umbrella", "umbrella", "umbrella"]
|
18 |
+
seq = list(zip(seq, [None] * len(seq)))
|
19 |
+
|
20 |
+
model = hmm._create_hmm_tagger(states, symbols, A, B, pi)
|
21 |
+
return model, states, symbols, seq
|
22 |
+
|
23 |
+
|
24 |
+
def test_forward_probability():
|
25 |
+
from numpy.testing import assert_array_almost_equal
|
26 |
+
|
27 |
+
# example from p. 385, Huang et al
|
28 |
+
model, states, symbols = hmm._market_hmm_example()
|
29 |
+
seq = [("up", None), ("up", None)]
|
30 |
+
expected = [[0.35, 0.02, 0.09], [0.1792, 0.0085, 0.0357]]
|
31 |
+
|
32 |
+
fp = 2 ** model._forward_probability(seq)
|
33 |
+
|
34 |
+
assert_array_almost_equal(fp, expected)
|
35 |
+
|
36 |
+
|
37 |
+
def test_forward_probability2():
|
38 |
+
from numpy.testing import assert_array_almost_equal
|
39 |
+
|
40 |
+
model, states, symbols, seq = _wikipedia_example_hmm()
|
41 |
+
fp = 2 ** model._forward_probability(seq)
|
42 |
+
|
43 |
+
# examples in wikipedia are normalized
|
44 |
+
fp = (fp.T / fp.sum(axis=1)).T
|
45 |
+
|
46 |
+
wikipedia_results = [
|
47 |
+
[0.8182, 0.1818],
|
48 |
+
[0.8834, 0.1166],
|
49 |
+
[0.1907, 0.8093],
|
50 |
+
[0.7308, 0.2692],
|
51 |
+
[0.8673, 0.1327],
|
52 |
+
]
|
53 |
+
|
54 |
+
assert_array_almost_equal(wikipedia_results, fp, 4)
|
55 |
+
|
56 |
+
|
57 |
+
def test_backward_probability():
|
58 |
+
from numpy.testing import assert_array_almost_equal
|
59 |
+
|
60 |
+
model, states, symbols, seq = _wikipedia_example_hmm()
|
61 |
+
|
62 |
+
bp = 2 ** model._backward_probability(seq)
|
63 |
+
# examples in wikipedia are normalized
|
64 |
+
|
65 |
+
bp = (bp.T / bp.sum(axis=1)).T
|
66 |
+
|
67 |
+
wikipedia_results = [
|
68 |
+
# Forward-backward algorithm doesn't need b0_5,
|
69 |
+
# so .backward_probability doesn't compute it.
|
70 |
+
# [0.6469, 0.3531],
|
71 |
+
[0.5923, 0.4077],
|
72 |
+
[0.3763, 0.6237],
|
73 |
+
[0.6533, 0.3467],
|
74 |
+
[0.6273, 0.3727],
|
75 |
+
[0.5, 0.5],
|
76 |
+
]
|
77 |
+
|
78 |
+
assert_array_almost_equal(wikipedia_results, bp, 4)
|
79 |
+
|
80 |
+
|
81 |
+
def setup_module(module):
|
82 |
+
pytest.importorskip("numpy")
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_json2csv_corpus.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Twitter client
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Lorenzo Rubio <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Regression tests for `json2csv()` and `json2csv_entities()` in Twitter
|
10 |
+
package.
|
11 |
+
"""
|
12 |
+
from pathlib import Path
|
13 |
+
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
from nltk.corpus import twitter_samples
|
17 |
+
from nltk.twitter.common import json2csv, json2csv_entities
|
18 |
+
|
19 |
+
|
20 |
+
def files_are_identical(pathA, pathB):
|
21 |
+
"""
|
22 |
+
Compare two files, ignoring carriage returns,
|
23 |
+
leading whitespace, and trailing whitespace
|
24 |
+
"""
|
25 |
+
f1 = [l.strip() for l in pathA.read_bytes().splitlines()]
|
26 |
+
f2 = [l.strip() for l in pathB.read_bytes().splitlines()]
|
27 |
+
return f1 == f2
|
28 |
+
|
29 |
+
|
30 |
+
subdir = Path(__file__).parent / "files"
|
31 |
+
|
32 |
+
|
33 |
+
@pytest.fixture
|
34 |
+
def infile():
|
35 |
+
with open(twitter_samples.abspath("tweets.20150430-223406.json")) as infile:
|
36 |
+
return [next(infile) for x in range(100)]
|
37 |
+
|
38 |
+
|
39 |
+
def test_textoutput(tmp_path, infile):
|
40 |
+
ref_fn = subdir / "tweets.20150430-223406.text.csv.ref"
|
41 |
+
outfn = tmp_path / "tweets.20150430-223406.text.csv"
|
42 |
+
json2csv(infile, outfn, ["text"], gzip_compress=False)
|
43 |
+
assert files_are_identical(outfn, ref_fn)
|
44 |
+
|
45 |
+
|
46 |
+
def test_tweet_metadata(tmp_path, infile):
|
47 |
+
ref_fn = subdir / "tweets.20150430-223406.tweet.csv.ref"
|
48 |
+
fields = [
|
49 |
+
"created_at",
|
50 |
+
"favorite_count",
|
51 |
+
"id",
|
52 |
+
"in_reply_to_status_id",
|
53 |
+
"in_reply_to_user_id",
|
54 |
+
"retweet_count",
|
55 |
+
"retweeted",
|
56 |
+
"text",
|
57 |
+
"truncated",
|
58 |
+
"user.id",
|
59 |
+
]
|
60 |
+
|
61 |
+
outfn = tmp_path / "tweets.20150430-223406.tweet.csv"
|
62 |
+
json2csv(infile, outfn, fields, gzip_compress=False)
|
63 |
+
assert files_are_identical(outfn, ref_fn)
|
64 |
+
|
65 |
+
|
66 |
+
def test_user_metadata(tmp_path, infile):
|
67 |
+
ref_fn = subdir / "tweets.20150430-223406.user.csv.ref"
|
68 |
+
fields = ["id", "text", "user.id", "user.followers_count", "user.friends_count"]
|
69 |
+
|
70 |
+
outfn = tmp_path / "tweets.20150430-223406.user.csv"
|
71 |
+
json2csv(infile, outfn, fields, gzip_compress=False)
|
72 |
+
assert files_are_identical(outfn, ref_fn)
|
73 |
+
|
74 |
+
|
75 |
+
def test_tweet_hashtag(tmp_path, infile):
|
76 |
+
ref_fn = subdir / "tweets.20150430-223406.hashtag.csv.ref"
|
77 |
+
outfn = tmp_path / "tweets.20150430-223406.hashtag.csv"
|
78 |
+
json2csv_entities(
|
79 |
+
infile,
|
80 |
+
outfn,
|
81 |
+
["id", "text"],
|
82 |
+
"hashtags",
|
83 |
+
["text"],
|
84 |
+
gzip_compress=False,
|
85 |
+
)
|
86 |
+
assert files_are_identical(outfn, ref_fn)
|
87 |
+
|
88 |
+
|
89 |
+
def test_tweet_usermention(tmp_path, infile):
|
90 |
+
ref_fn = subdir / "tweets.20150430-223406.usermention.csv.ref"
|
91 |
+
outfn = tmp_path / "tweets.20150430-223406.usermention.csv"
|
92 |
+
json2csv_entities(
|
93 |
+
infile,
|
94 |
+
outfn,
|
95 |
+
["id", "text"],
|
96 |
+
"user_mentions",
|
97 |
+
["id", "screen_name"],
|
98 |
+
gzip_compress=False,
|
99 |
+
)
|
100 |
+
assert files_are_identical(outfn, ref_fn)
|
101 |
+
|
102 |
+
|
103 |
+
def test_tweet_media(tmp_path, infile):
|
104 |
+
ref_fn = subdir / "tweets.20150430-223406.media.csv.ref"
|
105 |
+
outfn = tmp_path / "tweets.20150430-223406.media.csv"
|
106 |
+
json2csv_entities(
|
107 |
+
infile,
|
108 |
+
outfn,
|
109 |
+
["id"],
|
110 |
+
"media",
|
111 |
+
["media_url", "url"],
|
112 |
+
gzip_compress=False,
|
113 |
+
)
|
114 |
+
|
115 |
+
assert files_are_identical(outfn, ref_fn)
|
116 |
+
|
117 |
+
|
118 |
+
def test_tweet_url(tmp_path, infile):
|
119 |
+
ref_fn = subdir / "tweets.20150430-223406.url.csv.ref"
|
120 |
+
outfn = tmp_path / "tweets.20150430-223406.url.csv"
|
121 |
+
json2csv_entities(
|
122 |
+
infile,
|
123 |
+
outfn,
|
124 |
+
["id"],
|
125 |
+
"urls",
|
126 |
+
["url", "expanded_url"],
|
127 |
+
gzip_compress=False,
|
128 |
+
)
|
129 |
+
|
130 |
+
assert files_are_identical(outfn, ref_fn)
|
131 |
+
|
132 |
+
|
133 |
+
def test_userurl(tmp_path, infile):
|
134 |
+
ref_fn = subdir / "tweets.20150430-223406.userurl.csv.ref"
|
135 |
+
outfn = tmp_path / "tweets.20150430-223406.userurl.csv"
|
136 |
+
json2csv_entities(
|
137 |
+
infile,
|
138 |
+
outfn,
|
139 |
+
["id", "screen_name"],
|
140 |
+
"user.urls",
|
141 |
+
["url", "expanded_url"],
|
142 |
+
gzip_compress=False,
|
143 |
+
)
|
144 |
+
|
145 |
+
assert files_are_identical(outfn, ref_fn)
|
146 |
+
|
147 |
+
|
148 |
+
def test_tweet_place(tmp_path, infile):
|
149 |
+
ref_fn = subdir / "tweets.20150430-223406.place.csv.ref"
|
150 |
+
outfn = tmp_path / "tweets.20150430-223406.place.csv"
|
151 |
+
json2csv_entities(
|
152 |
+
infile,
|
153 |
+
outfn,
|
154 |
+
["id", "text"],
|
155 |
+
"place",
|
156 |
+
["name", "country"],
|
157 |
+
gzip_compress=False,
|
158 |
+
)
|
159 |
+
|
160 |
+
assert files_are_identical(outfn, ref_fn)
|
161 |
+
|
162 |
+
|
163 |
+
def test_tweet_place_boundingbox(tmp_path, infile):
|
164 |
+
ref_fn = subdir / "tweets.20150430-223406.placeboundingbox.csv.ref"
|
165 |
+
outfn = tmp_path / "tweets.20150430-223406.placeboundingbox.csv"
|
166 |
+
json2csv_entities(
|
167 |
+
infile,
|
168 |
+
outfn,
|
169 |
+
["id", "name"],
|
170 |
+
"place.bounding_box",
|
171 |
+
["coordinates"],
|
172 |
+
gzip_compress=False,
|
173 |
+
)
|
174 |
+
|
175 |
+
assert files_are_identical(outfn, ref_fn)
|
176 |
+
|
177 |
+
|
178 |
+
def test_retweet_original_tweet(tmp_path, infile):
|
179 |
+
ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref"
|
180 |
+
outfn = tmp_path / "tweets.20150430-223406.retweet.csv"
|
181 |
+
json2csv_entities(
|
182 |
+
infile,
|
183 |
+
outfn,
|
184 |
+
["id"],
|
185 |
+
"retweeted_status",
|
186 |
+
[
|
187 |
+
"created_at",
|
188 |
+
"favorite_count",
|
189 |
+
"id",
|
190 |
+
"in_reply_to_status_id",
|
191 |
+
"in_reply_to_user_id",
|
192 |
+
"retweet_count",
|
193 |
+
"text",
|
194 |
+
"truncated",
|
195 |
+
"user.id",
|
196 |
+
],
|
197 |
+
gzip_compress=False,
|
198 |
+
)
|
199 |
+
|
200 |
+
assert files_are_identical(outfn, ref_fn)
|
201 |
+
|
202 |
+
|
203 |
+
def test_file_is_wrong(tmp_path, infile):
|
204 |
+
"""
|
205 |
+
Sanity check that file comparison is not giving false positives.
|
206 |
+
"""
|
207 |
+
ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref"
|
208 |
+
outfn = tmp_path / "tweets.20150430-223406.text.csv"
|
209 |
+
json2csv(infile, outfn, ["text"], gzip_compress=False)
|
210 |
+
assert not files_are_identical(outfn, ref_fn)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_json_serialization.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.corpus import brown
|
4 |
+
from nltk.jsontags import JSONTaggedDecoder, JSONTaggedEncoder
|
5 |
+
from nltk.tag import (
|
6 |
+
AffixTagger,
|
7 |
+
BigramTagger,
|
8 |
+
BrillTagger,
|
9 |
+
BrillTaggerTrainer,
|
10 |
+
DefaultTagger,
|
11 |
+
NgramTagger,
|
12 |
+
PerceptronTagger,
|
13 |
+
RegexpTagger,
|
14 |
+
TrigramTagger,
|
15 |
+
UnigramTagger,
|
16 |
+
)
|
17 |
+
from nltk.tag.brill import nltkdemo18
|
18 |
+
|
19 |
+
|
20 |
+
class TestJSONSerialization(unittest.TestCase):
|
21 |
+
def setUp(self):
|
22 |
+
self.corpus = brown.tagged_sents()[:35]
|
23 |
+
self.decoder = JSONTaggedDecoder()
|
24 |
+
self.encoder = JSONTaggedEncoder()
|
25 |
+
self.default_tagger = DefaultTagger("NN")
|
26 |
+
|
27 |
+
def test_default_tagger(self):
|
28 |
+
encoded = self.encoder.encode(self.default_tagger)
|
29 |
+
decoded = self.decoder.decode(encoded)
|
30 |
+
|
31 |
+
self.assertEqual(repr(self.default_tagger), repr(decoded))
|
32 |
+
self.assertEqual(self.default_tagger._tag, decoded._tag)
|
33 |
+
|
34 |
+
def test_regexp_tagger(self):
|
35 |
+
tagger = RegexpTagger([(r".*", "NN")], backoff=self.default_tagger)
|
36 |
+
|
37 |
+
encoded = self.encoder.encode(tagger)
|
38 |
+
decoded = self.decoder.decode(encoded)
|
39 |
+
|
40 |
+
self.assertEqual(repr(tagger), repr(decoded))
|
41 |
+
self.assertEqual(repr(tagger.backoff), repr(decoded.backoff))
|
42 |
+
self.assertEqual(tagger._regexps, decoded._regexps)
|
43 |
+
|
44 |
+
def test_affix_tagger(self):
|
45 |
+
tagger = AffixTagger(self.corpus, backoff=self.default_tagger)
|
46 |
+
|
47 |
+
encoded = self.encoder.encode(tagger)
|
48 |
+
decoded = self.decoder.decode(encoded)
|
49 |
+
|
50 |
+
self.assertEqual(repr(tagger), repr(decoded))
|
51 |
+
self.assertEqual(repr(tagger.backoff), repr(decoded.backoff))
|
52 |
+
self.assertEqual(tagger._affix_length, decoded._affix_length)
|
53 |
+
self.assertEqual(tagger._min_word_length, decoded._min_word_length)
|
54 |
+
self.assertEqual(tagger._context_to_tag, decoded._context_to_tag)
|
55 |
+
|
56 |
+
def test_ngram_taggers(self):
|
57 |
+
unitagger = UnigramTagger(self.corpus, backoff=self.default_tagger)
|
58 |
+
bitagger = BigramTagger(self.corpus, backoff=unitagger)
|
59 |
+
tritagger = TrigramTagger(self.corpus, backoff=bitagger)
|
60 |
+
ntagger = NgramTagger(4, self.corpus, backoff=tritagger)
|
61 |
+
|
62 |
+
encoded = self.encoder.encode(ntagger)
|
63 |
+
decoded = self.decoder.decode(encoded)
|
64 |
+
|
65 |
+
self.assertEqual(repr(ntagger), repr(decoded))
|
66 |
+
self.assertEqual(repr(tritagger), repr(decoded.backoff))
|
67 |
+
self.assertEqual(repr(bitagger), repr(decoded.backoff.backoff))
|
68 |
+
self.assertEqual(repr(unitagger), repr(decoded.backoff.backoff.backoff))
|
69 |
+
self.assertEqual(
|
70 |
+
repr(self.default_tagger), repr(decoded.backoff.backoff.backoff.backoff)
|
71 |
+
)
|
72 |
+
|
73 |
+
def test_perceptron_tagger(self):
|
74 |
+
tagger = PerceptronTagger(load=False)
|
75 |
+
tagger.train(self.corpus)
|
76 |
+
|
77 |
+
encoded = self.encoder.encode(tagger)
|
78 |
+
decoded = self.decoder.decode(encoded)
|
79 |
+
|
80 |
+
self.assertEqual(tagger.model.weights, decoded.model.weights)
|
81 |
+
self.assertEqual(tagger.tagdict, decoded.tagdict)
|
82 |
+
self.assertEqual(tagger.classes, decoded.classes)
|
83 |
+
|
84 |
+
def test_brill_tagger(self):
|
85 |
+
trainer = BrillTaggerTrainer(
|
86 |
+
self.default_tagger, nltkdemo18(), deterministic=True
|
87 |
+
)
|
88 |
+
tagger = trainer.train(self.corpus, max_rules=30)
|
89 |
+
|
90 |
+
encoded = self.encoder.encode(tagger)
|
91 |
+
decoded = self.decoder.decode(encoded)
|
92 |
+
|
93 |
+
self.assertEqual(repr(tagger._initial_tagger), repr(decoded._initial_tagger))
|
94 |
+
self.assertEqual(tagger._rules, decoded._rules)
|
95 |
+
self.assertEqual(tagger._training_stats, decoded._training_stats)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.metrics import (
|
4 |
+
BigramAssocMeasures,
|
5 |
+
QuadgramAssocMeasures,
|
6 |
+
TrigramAssocMeasures,
|
7 |
+
)
|
8 |
+
|
9 |
+
## Test the likelihood ratio metric
|
10 |
+
|
11 |
+
_DELTA = 1e-8
|
12 |
+
|
13 |
+
|
14 |
+
class TestLikelihoodRatio(unittest.TestCase):
|
15 |
+
def test_lr_bigram(self):
|
16 |
+
self.assertAlmostEqual(
|
17 |
+
BigramAssocMeasures.likelihood_ratio(2, (4, 4), 20),
|
18 |
+
2.4142743368419755,
|
19 |
+
delta=_DELTA,
|
20 |
+
)
|
21 |
+
self.assertAlmostEqual(
|
22 |
+
BigramAssocMeasures.likelihood_ratio(1, (1, 1), 1), 0.0, delta=_DELTA
|
23 |
+
)
|
24 |
+
self.assertRaises(
|
25 |
+
ValueError,
|
26 |
+
BigramAssocMeasures.likelihood_ratio,
|
27 |
+
*(0, (2, 2), 2),
|
28 |
+
)
|
29 |
+
|
30 |
+
def test_lr_trigram(self):
|
31 |
+
self.assertAlmostEqual(
|
32 |
+
TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 2),
|
33 |
+
5.545177444479562,
|
34 |
+
delta=_DELTA,
|
35 |
+
)
|
36 |
+
self.assertAlmostEqual(
|
37 |
+
TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 1),
|
38 |
+
0.0,
|
39 |
+
delta=_DELTA,
|
40 |
+
)
|
41 |
+
self.assertRaises(
|
42 |
+
ValueError,
|
43 |
+
TrigramAssocMeasures.likelihood_ratio,
|
44 |
+
*(1, (1, 1, 2), (1, 1, 2), 2),
|
45 |
+
)
|
46 |
+
|
47 |
+
def test_lr_quadgram(self):
|
48 |
+
self.assertAlmostEqual(
|
49 |
+
QuadgramAssocMeasures.likelihood_ratio(
|
50 |
+
1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 2
|
51 |
+
),
|
52 |
+
8.317766166719343,
|
53 |
+
delta=_DELTA,
|
54 |
+
)
|
55 |
+
self.assertAlmostEqual(
|
56 |
+
QuadgramAssocMeasures.likelihood_ratio(
|
57 |
+
1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 1
|
58 |
+
),
|
59 |
+
0.0,
|
60 |
+
delta=_DELTA,
|
61 |
+
)
|
62 |
+
self.assertRaises(
|
63 |
+
ValueError,
|
64 |
+
QuadgramAssocMeasures.likelihood_ratio,
|
65 |
+
*(1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 2), (1, 1, 1, 1), 1),
|
66 |
+
)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_naivebayes.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
from nltk.classify.naivebayes import NaiveBayesClassifier
|
4 |
+
|
5 |
+
|
6 |
+
class NaiveBayesClassifierTest(unittest.TestCase):
|
7 |
+
def test_simple(self):
|
8 |
+
training_features = [
|
9 |
+
({"nice": True, "good": True}, "positive"),
|
10 |
+
({"bad": True, "mean": True}, "negative"),
|
11 |
+
]
|
12 |
+
|
13 |
+
classifier = NaiveBayesClassifier.train(training_features)
|
14 |
+
|
15 |
+
result = classifier.prob_classify({"nice": True})
|
16 |
+
self.assertTrue(result.prob("positive") > result.prob("negative"))
|
17 |
+
self.assertEqual(result.max(), "positive")
|
18 |
+
|
19 |
+
result = classifier.prob_classify({"bad": True})
|
20 |
+
self.assertTrue(result.prob("positive") < result.prob("negative"))
|
21 |
+
self.assertEqual(result.max(), "negative")
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_nombank.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for nltk.corpus.nombank
|
3 |
+
"""
|
4 |
+
|
5 |
+
import unittest
|
6 |
+
|
7 |
+
from nltk.corpus import nombank
|
8 |
+
|
9 |
+
# Load the nombank once.
|
10 |
+
nombank.nouns()
|
11 |
+
|
12 |
+
|
13 |
+
class NombankDemo(unittest.TestCase):
|
14 |
+
def test_numbers(self):
|
15 |
+
# No. of instances.
|
16 |
+
self.assertEqual(len(nombank.instances()), 114574)
|
17 |
+
# No. of rolesets
|
18 |
+
self.assertEqual(len(nombank.rolesets()), 5577)
|
19 |
+
# No. of nouns.
|
20 |
+
self.assertEqual(len(nombank.nouns()), 4704)
|
21 |
+
|
22 |
+
def test_instance(self):
|
23 |
+
self.assertEqual(nombank.instances()[0].roleset, "perc-sign.01")
|
24 |
+
|
25 |
+
def test_framefiles_fileids(self):
|
26 |
+
self.assertEqual(len(nombank.fileids()), 4705)
|
27 |
+
self.assertTrue(all(fileid.endswith(".xml") for fileid in nombank.fileids()))
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_pl196x.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import unittest
|
2 |
+
|
3 |
+
import nltk
|
4 |
+
from nltk.corpus.reader import pl196x
|
5 |
+
|
6 |
+
|
7 |
+
class TestCorpusViews(unittest.TestCase):
|
8 |
+
def test_corpus_reader(self):
|
9 |
+
pl196x_dir = nltk.data.find("corpora/pl196x")
|
10 |
+
pl = pl196x.Pl196xCorpusReader(
|
11 |
+
pl196x_dir, r".*\.xml", textids="textids.txt", cat_file="cats.txt"
|
12 |
+
)
|
13 |
+
pl.tagged_words(fileids=pl.fileids(), categories="cats.txt")
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_pos_tag.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for nltk.pos_tag
|
3 |
+
"""
|
4 |
+
|
5 |
+
|
6 |
+
import unittest
|
7 |
+
|
8 |
+
from nltk import pos_tag, word_tokenize
|
9 |
+
|
10 |
+
|
11 |
+
class TestPosTag(unittest.TestCase):
|
12 |
+
def test_pos_tag_eng(self):
|
13 |
+
text = "John's big idea isn't all that bad."
|
14 |
+
expected_tagged = [
|
15 |
+
("John", "NNP"),
|
16 |
+
("'s", "POS"),
|
17 |
+
("big", "JJ"),
|
18 |
+
("idea", "NN"),
|
19 |
+
("is", "VBZ"),
|
20 |
+
("n't", "RB"),
|
21 |
+
("all", "PDT"),
|
22 |
+
("that", "DT"),
|
23 |
+
("bad", "JJ"),
|
24 |
+
(".", "."),
|
25 |
+
]
|
26 |
+
assert pos_tag(word_tokenize(text)) == expected_tagged
|
27 |
+
|
28 |
+
def test_pos_tag_eng_universal(self):
|
29 |
+
text = "John's big idea isn't all that bad."
|
30 |
+
expected_tagged = [
|
31 |
+
("John", "NOUN"),
|
32 |
+
("'s", "PRT"),
|
33 |
+
("big", "ADJ"),
|
34 |
+
("idea", "NOUN"),
|
35 |
+
("is", "VERB"),
|
36 |
+
("n't", "ADV"),
|
37 |
+
("all", "DET"),
|
38 |
+
("that", "DET"),
|
39 |
+
("bad", "ADJ"),
|
40 |
+
(".", "."),
|
41 |
+
]
|
42 |
+
assert pos_tag(word_tokenize(text), tagset="universal") == expected_tagged
|
43 |
+
|
44 |
+
def test_pos_tag_rus(self):
|
45 |
+
text = "Илья оторопел и дважды перечитал бумажку."
|
46 |
+
expected_tagged = [
|
47 |
+
("Илья", "S"),
|
48 |
+
("оторопел", "V"),
|
49 |
+
("и", "CONJ"),
|
50 |
+
("дважды", "ADV"),
|
51 |
+
("перечитал", "V"),
|
52 |
+
("бумажку", "S"),
|
53 |
+
(".", "NONLEX"),
|
54 |
+
]
|
55 |
+
assert pos_tag(word_tokenize(text), lang="rus") == expected_tagged
|
56 |
+
|
57 |
+
def test_pos_tag_rus_universal(self):
|
58 |
+
text = "Илья оторопел и дважды перечитал бумажку."
|
59 |
+
expected_tagged = [
|
60 |
+
("Илья", "NOUN"),
|
61 |
+
("оторопел", "VERB"),
|
62 |
+
("и", "CONJ"),
|
63 |
+
("дважды", "ADV"),
|
64 |
+
("перечитал", "VERB"),
|
65 |
+
("бумажку", "NOUN"),
|
66 |
+
(".", "."),
|
67 |
+
]
|
68 |
+
assert (
|
69 |
+
pos_tag(word_tokenize(text), tagset="universal", lang="rus")
|
70 |
+
== expected_tagged
|
71 |
+
)
|
72 |
+
|
73 |
+
def test_pos_tag_unknown_lang(self):
|
74 |
+
text = "모르겠 습니 다"
|
75 |
+
self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang="kor")
|
76 |
+
# Test for default kwarg, `lang=None`
|
77 |
+
self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang=None)
|
78 |
+
|
79 |
+
def test_unspecified_lang(self):
|
80 |
+
# Tries to force the lang='eng' option.
|
81 |
+
text = "모르겠 습니 다"
|
82 |
+
expected_but_wrong = [("모르겠", "JJ"), ("습니", "NNP"), ("다", "NN")]
|
83 |
+
assert pos_tag(word_tokenize(text)) == expected_but_wrong
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_ribes.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk.translate.ribes_score import corpus_ribes, word_rank_alignment
|
2 |
+
|
3 |
+
|
4 |
+
def test_ribes_empty_worder(): # worder as in word order
|
5 |
+
# Verifies that these two sentences have no alignment,
|
6 |
+
# and hence have the lowest possible RIBES score.
|
7 |
+
hyp = "This is a nice sentence which I quite like".split()
|
8 |
+
ref = "Okay well that's neat and all but the reference's different".split()
|
9 |
+
|
10 |
+
assert word_rank_alignment(ref, hyp) == []
|
11 |
+
|
12 |
+
list_of_refs = [[ref]]
|
13 |
+
hypotheses = [hyp]
|
14 |
+
assert corpus_ribes(list_of_refs, hypotheses) == 0.0
|
15 |
+
|
16 |
+
|
17 |
+
def test_ribes_one_worder():
|
18 |
+
# Verifies that these two sentences have just one match,
|
19 |
+
# and the RIBES score for this sentence with very little
|
20 |
+
# correspondence is 0.
|
21 |
+
hyp = "This is a nice sentence which I quite like".split()
|
22 |
+
ref = "Okay well that's nice and all but the reference's different".split()
|
23 |
+
|
24 |
+
assert word_rank_alignment(ref, hyp) == [3]
|
25 |
+
|
26 |
+
list_of_refs = [[ref]]
|
27 |
+
hypotheses = [hyp]
|
28 |
+
assert corpus_ribes(list_of_refs, hypotheses) == 0.0
|
29 |
+
|
30 |
+
|
31 |
+
def test_ribes_two_worder():
|
32 |
+
# Verifies that these two sentences have two matches,
|
33 |
+
# but still get the lowest possible RIBES score due
|
34 |
+
# to the lack of similarity.
|
35 |
+
hyp = "This is a nice sentence which I quite like".split()
|
36 |
+
ref = "Okay well that's nice and all but the reference is different".split()
|
37 |
+
|
38 |
+
assert word_rank_alignment(ref, hyp) == [9, 3]
|
39 |
+
|
40 |
+
list_of_refs = [[ref]]
|
41 |
+
hypotheses = [hyp]
|
42 |
+
assert corpus_ribes(list_of_refs, hypotheses) == 0.0
|
43 |
+
|
44 |
+
|
45 |
+
def test_ribes():
|
46 |
+
# Based on the doctest of the corpus_ribes function
|
47 |
+
hyp1 = [
|
48 |
+
"It",
|
49 |
+
"is",
|
50 |
+
"a",
|
51 |
+
"guide",
|
52 |
+
"to",
|
53 |
+
"action",
|
54 |
+
"which",
|
55 |
+
"ensures",
|
56 |
+
"that",
|
57 |
+
"the",
|
58 |
+
"military",
|
59 |
+
"always",
|
60 |
+
"obeys",
|
61 |
+
"the",
|
62 |
+
"commands",
|
63 |
+
"of",
|
64 |
+
"the",
|
65 |
+
"party",
|
66 |
+
]
|
67 |
+
ref1a = [
|
68 |
+
"It",
|
69 |
+
"is",
|
70 |
+
"a",
|
71 |
+
"guide",
|
72 |
+
"to",
|
73 |
+
"action",
|
74 |
+
"that",
|
75 |
+
"ensures",
|
76 |
+
"that",
|
77 |
+
"the",
|
78 |
+
"military",
|
79 |
+
"will",
|
80 |
+
"forever",
|
81 |
+
"heed",
|
82 |
+
"Party",
|
83 |
+
"commands",
|
84 |
+
]
|
85 |
+
ref1b = [
|
86 |
+
"It",
|
87 |
+
"is",
|
88 |
+
"the",
|
89 |
+
"guiding",
|
90 |
+
"principle",
|
91 |
+
"which",
|
92 |
+
"guarantees",
|
93 |
+
"the",
|
94 |
+
"military",
|
95 |
+
"forces",
|
96 |
+
"always",
|
97 |
+
"being",
|
98 |
+
"under",
|
99 |
+
"the",
|
100 |
+
"command",
|
101 |
+
"of",
|
102 |
+
"the",
|
103 |
+
"Party",
|
104 |
+
]
|
105 |
+
ref1c = [
|
106 |
+
"It",
|
107 |
+
"is",
|
108 |
+
"the",
|
109 |
+
"practical",
|
110 |
+
"guide",
|
111 |
+
"for",
|
112 |
+
"the",
|
113 |
+
"army",
|
114 |
+
"always",
|
115 |
+
"to",
|
116 |
+
"heed",
|
117 |
+
"the",
|
118 |
+
"directions",
|
119 |
+
"of",
|
120 |
+
"the",
|
121 |
+
"party",
|
122 |
+
]
|
123 |
+
|
124 |
+
hyp2 = [
|
125 |
+
"he",
|
126 |
+
"read",
|
127 |
+
"the",
|
128 |
+
"book",
|
129 |
+
"because",
|
130 |
+
"he",
|
131 |
+
"was",
|
132 |
+
"interested",
|
133 |
+
"in",
|
134 |
+
"world",
|
135 |
+
"history",
|
136 |
+
]
|
137 |
+
ref2a = [
|
138 |
+
"he",
|
139 |
+
"was",
|
140 |
+
"interested",
|
141 |
+
"in",
|
142 |
+
"world",
|
143 |
+
"history",
|
144 |
+
"because",
|
145 |
+
"he",
|
146 |
+
"read",
|
147 |
+
"the",
|
148 |
+
"book",
|
149 |
+
]
|
150 |
+
|
151 |
+
list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]]
|
152 |
+
hypotheses = [hyp1, hyp2]
|
153 |
+
|
154 |
+
score = corpus_ribes(list_of_refs, hypotheses)
|
155 |
+
|
156 |
+
assert round(score, 4) == 0.3597
|
157 |
+
|
158 |
+
|
159 |
+
def test_no_zero_div():
|
160 |
+
# Regression test for Issue 2529, assure that no ZeroDivisionError is thrown.
|
161 |
+
hyp1 = [
|
162 |
+
"It",
|
163 |
+
"is",
|
164 |
+
"a",
|
165 |
+
"guide",
|
166 |
+
"to",
|
167 |
+
"action",
|
168 |
+
"which",
|
169 |
+
"ensures",
|
170 |
+
"that",
|
171 |
+
"the",
|
172 |
+
"military",
|
173 |
+
"always",
|
174 |
+
"obeys",
|
175 |
+
"the",
|
176 |
+
"commands",
|
177 |
+
"of",
|
178 |
+
"the",
|
179 |
+
"party",
|
180 |
+
]
|
181 |
+
ref1a = [
|
182 |
+
"It",
|
183 |
+
"is",
|
184 |
+
"a",
|
185 |
+
"guide",
|
186 |
+
"to",
|
187 |
+
"action",
|
188 |
+
"that",
|
189 |
+
"ensures",
|
190 |
+
"that",
|
191 |
+
"the",
|
192 |
+
"military",
|
193 |
+
"will",
|
194 |
+
"forever",
|
195 |
+
"heed",
|
196 |
+
"Party",
|
197 |
+
"commands",
|
198 |
+
]
|
199 |
+
ref1b = [
|
200 |
+
"It",
|
201 |
+
"is",
|
202 |
+
"the",
|
203 |
+
"guiding",
|
204 |
+
"principle",
|
205 |
+
"which",
|
206 |
+
"guarantees",
|
207 |
+
"the",
|
208 |
+
"military",
|
209 |
+
"forces",
|
210 |
+
"always",
|
211 |
+
"being",
|
212 |
+
"under",
|
213 |
+
"the",
|
214 |
+
"command",
|
215 |
+
"of",
|
216 |
+
"the",
|
217 |
+
"Party",
|
218 |
+
]
|
219 |
+
ref1c = [
|
220 |
+
"It",
|
221 |
+
"is",
|
222 |
+
"the",
|
223 |
+
"practical",
|
224 |
+
"guide",
|
225 |
+
"for",
|
226 |
+
"the",
|
227 |
+
"army",
|
228 |
+
"always",
|
229 |
+
"to",
|
230 |
+
"heed",
|
231 |
+
"the",
|
232 |
+
"directions",
|
233 |
+
"of",
|
234 |
+
"the",
|
235 |
+
"party",
|
236 |
+
]
|
237 |
+
|
238 |
+
hyp2 = ["he", "read", "the"]
|
239 |
+
ref2a = ["he", "was", "interested", "in", "world", "history", "because", "he"]
|
240 |
+
|
241 |
+
list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]]
|
242 |
+
hypotheses = [hyp1, hyp2]
|
243 |
+
|
244 |
+
score = corpus_ribes(list_of_refs, hypotheses)
|
245 |
+
|
246 |
+
assert round(score, 4) == 0.1688
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk import config_megam
|
4 |
+
from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features
|
5 |
+
from nltk.corpus import rte as rte_corpus
|
6 |
+
|
7 |
+
expected_from_rte_feature_extration = """
|
8 |
+
alwayson => True
|
9 |
+
ne_hyp_extra => 0
|
10 |
+
ne_overlap => 1
|
11 |
+
neg_hyp => 0
|
12 |
+
neg_txt => 0
|
13 |
+
word_hyp_extra => 3
|
14 |
+
word_overlap => 3
|
15 |
+
|
16 |
+
alwayson => True
|
17 |
+
ne_hyp_extra => 0
|
18 |
+
ne_overlap => 1
|
19 |
+
neg_hyp => 0
|
20 |
+
neg_txt => 0
|
21 |
+
word_hyp_extra => 2
|
22 |
+
word_overlap => 1
|
23 |
+
|
24 |
+
alwayson => True
|
25 |
+
ne_hyp_extra => 1
|
26 |
+
ne_overlap => 1
|
27 |
+
neg_hyp => 0
|
28 |
+
neg_txt => 0
|
29 |
+
word_hyp_extra => 1
|
30 |
+
word_overlap => 2
|
31 |
+
|
32 |
+
alwayson => True
|
33 |
+
ne_hyp_extra => 1
|
34 |
+
ne_overlap => 0
|
35 |
+
neg_hyp => 0
|
36 |
+
neg_txt => 0
|
37 |
+
word_hyp_extra => 6
|
38 |
+
word_overlap => 2
|
39 |
+
|
40 |
+
alwayson => True
|
41 |
+
ne_hyp_extra => 1
|
42 |
+
ne_overlap => 0
|
43 |
+
neg_hyp => 0
|
44 |
+
neg_txt => 0
|
45 |
+
word_hyp_extra => 4
|
46 |
+
word_overlap => 0
|
47 |
+
|
48 |
+
alwayson => True
|
49 |
+
ne_hyp_extra => 1
|
50 |
+
ne_overlap => 0
|
51 |
+
neg_hyp => 0
|
52 |
+
neg_txt => 0
|
53 |
+
word_hyp_extra => 3
|
54 |
+
word_overlap => 1
|
55 |
+
"""
|
56 |
+
|
57 |
+
|
58 |
+
class TestRTEClassifier:
|
59 |
+
# Test the feature extraction method.
|
60 |
+
def test_rte_feature_extraction(self):
|
61 |
+
pairs = rte_corpus.pairs(["rte1_dev.xml"])[:6]
|
62 |
+
test_output = [
|
63 |
+
f"{key:<15} => {rte_features(pair)[key]}"
|
64 |
+
for pair in pairs
|
65 |
+
for key in sorted(rte_features(pair))
|
66 |
+
]
|
67 |
+
expected_output = expected_from_rte_feature_extration.strip().split("\n")
|
68 |
+
# Remove null strings.
|
69 |
+
expected_output = list(filter(None, expected_output))
|
70 |
+
assert test_output == expected_output
|
71 |
+
|
72 |
+
# Test the RTEFeatureExtractor object.
|
73 |
+
def test_feature_extractor_object(self):
|
74 |
+
rtepair = rte_corpus.pairs(["rte3_dev.xml"])[33]
|
75 |
+
extractor = RTEFeatureExtractor(rtepair)
|
76 |
+
|
77 |
+
assert extractor.hyp_words == {"member", "China", "SCO."}
|
78 |
+
assert extractor.overlap("word") == set()
|
79 |
+
assert extractor.overlap("ne") == {"China"}
|
80 |
+
assert extractor.hyp_extra("word") == {"member"}
|
81 |
+
|
82 |
+
# Test the RTE classifier training.
|
83 |
+
def test_rte_classification_without_megam(self):
|
84 |
+
# Use a sample size for unit testing, since we
|
85 |
+
# don't need to fully train these classifiers
|
86 |
+
clf = rte_classifier("IIS", sample_N=100)
|
87 |
+
clf = rte_classifier("GIS", sample_N=100)
|
88 |
+
|
89 |
+
def test_rte_classification_with_megam(self):
|
90 |
+
try:
|
91 |
+
config_megam()
|
92 |
+
except (LookupError, AttributeError) as e:
|
93 |
+
pytest.skip("Skipping tests with dependencies on MEGAM")
|
94 |
+
clf = rte_classifier("megam", sample_N=100)
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_seekable_unicode_stream_reader.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from io import BytesIO
|
3 |
+
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from nltk.corpus.reader import SeekableUnicodeStreamReader
|
7 |
+
|
8 |
+
|
9 |
+
def check_reader(unicode_string, encoding):
|
10 |
+
bytestr = unicode_string.encode(encoding)
|
11 |
+
stream = BytesIO(bytestr)
|
12 |
+
reader = SeekableUnicodeStreamReader(stream, encoding)
|
13 |
+
|
14 |
+
# Should open at the start of the file
|
15 |
+
assert reader.tell() == 0
|
16 |
+
|
17 |
+
# Compare original string to contents from `.readlines()`
|
18 |
+
assert unicode_string == "".join(reader.readlines())
|
19 |
+
|
20 |
+
# Should be at the end of the file now
|
21 |
+
stream.seek(0, os.SEEK_END)
|
22 |
+
assert reader.tell() == stream.tell()
|
23 |
+
|
24 |
+
reader.seek(0) # go back to start
|
25 |
+
|
26 |
+
# Compare original string to contents from `.read()`
|
27 |
+
contents = ""
|
28 |
+
char = None
|
29 |
+
while char != "":
|
30 |
+
char = reader.read(1)
|
31 |
+
contents += char
|
32 |
+
assert unicode_string == contents
|
33 |
+
|
34 |
+
|
35 |
+
# Call `check_reader` with a variety of input strings and encodings.
|
36 |
+
ENCODINGS = ["ascii", "latin1", "greek", "hebrew", "utf-16", "utf-8"]
|
37 |
+
|
38 |
+
STRINGS = [
|
39 |
+
"""
|
40 |
+
This is a test file.
|
41 |
+
It is fairly short.
|
42 |
+
""",
|
43 |
+
"This file can be encoded with latin1. \x83",
|
44 |
+
"""\
|
45 |
+
This is a test file.
|
46 |
+
Here's a blank line:
|
47 |
+
|
48 |
+
And here's some unicode: \xee \u0123 \uffe3
|
49 |
+
""",
|
50 |
+
"""\
|
51 |
+
This is a test file.
|
52 |
+
Unicode characters: \xf3 \u2222 \u3333\u4444 \u5555
|
53 |
+
""",
|
54 |
+
"""\
|
55 |
+
This is a larger file. It has some lines that are longer \
|
56 |
+
than 72 characters. It's got lots of repetition. Here's \
|
57 |
+
some unicode chars: \xee \u0123 \uffe3 \ueeee \u2345
|
58 |
+
|
59 |
+
How fun! Let's repeat it twenty times.
|
60 |
+
"""
|
61 |
+
* 20,
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.mark.parametrize("string", STRINGS)
|
66 |
+
def test_reader(string):
|
67 |
+
for encoding in ENCODINGS:
|
68 |
+
# skip strings that can't be encoded with the current encoding
|
69 |
+
try:
|
70 |
+
string.encode(encoding)
|
71 |
+
except UnicodeEncodeError:
|
72 |
+
continue
|
73 |
+
check_reader(string, encoding)
|
74 |
+
|
75 |
+
|
76 |
+
def test_reader_stream_closes_when_deleted():
|
77 |
+
reader = SeekableUnicodeStreamReader(BytesIO(b""), "ascii")
|
78 |
+
assert not reader.stream.closed
|
79 |
+
reader.__del__()
|
80 |
+
assert reader.stream.closed
|
81 |
+
|
82 |
+
|
83 |
+
def teardown_module(module=None):
|
84 |
+
import gc
|
85 |
+
|
86 |
+
gc.collect()
|
venv/lib/python3.10/site-packages/nltk/test/unit/test_senna.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unit tests for Senna
|
3 |
+
"""
|
4 |
+
|
5 |
+
import unittest
|
6 |
+
from os import environ, path, sep
|
7 |
+
|
8 |
+
from nltk.classify import Senna
|
9 |
+
from nltk.tag import SennaChunkTagger, SennaNERTagger, SennaTagger
|
10 |
+
|
11 |
+
# Set Senna executable path for tests if it is not specified as an environment variable
|
12 |
+
if "SENNA" in environ:
|
13 |
+
SENNA_EXECUTABLE_PATH = path.normpath(environ["SENNA"]) + sep
|
14 |
+
else:
|
15 |
+
SENNA_EXECUTABLE_PATH = "/usr/share/senna-v3.0"
|
16 |
+
|
17 |
+
senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
|
18 |
+
|
19 |
+
|
20 |
+
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
|
21 |
+
class TestSennaPipeline(unittest.TestCase):
|
22 |
+
"""Unittest for nltk.classify.senna"""
|
23 |
+
|
24 |
+
def test_senna_pipeline(self):
|
25 |
+
"""Senna pipeline interface"""
|
26 |
+
|
27 |
+
pipeline = Senna(SENNA_EXECUTABLE_PATH, ["pos", "chk", "ner"])
|
28 |
+
sent = "Dusseldorf is an international business center".split()
|
29 |
+
result = [
|
30 |
+
(token["word"], token["chk"], token["ner"], token["pos"])
|
31 |
+
for token in pipeline.tag(sent)
|
32 |
+
]
|
33 |
+
expected = [
|
34 |
+
("Dusseldorf", "B-NP", "B-LOC", "NNP"),
|
35 |
+
("is", "B-VP", "O", "VBZ"),
|
36 |
+
("an", "B-NP", "O", "DT"),
|
37 |
+
("international", "I-NP", "O", "JJ"),
|
38 |
+
("business", "I-NP", "O", "NN"),
|
39 |
+
("center", "I-NP", "O", "NN"),
|
40 |
+
]
|
41 |
+
self.assertEqual(result, expected)
|
42 |
+
|
43 |
+
|
44 |
+
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
|
45 |
+
class TestSennaTagger(unittest.TestCase):
|
46 |
+
"""Unittest for nltk.tag.senna"""
|
47 |
+
|
48 |
+
def test_senna_tagger(self):
|
49 |
+
tagger = SennaTagger(SENNA_EXECUTABLE_PATH)
|
50 |
+
result = tagger.tag("What is the airspeed of an unladen swallow ?".split())
|
51 |
+
expected = [
|
52 |
+
("What", "WP"),
|
53 |
+
("is", "VBZ"),
|
54 |
+
("the", "DT"),
|
55 |
+
("airspeed", "NN"),
|
56 |
+
("of", "IN"),
|
57 |
+
("an", "DT"),
|
58 |
+
("unladen", "NN"),
|
59 |
+
("swallow", "NN"),
|
60 |
+
("?", "."),
|
61 |
+
]
|
62 |
+
self.assertEqual(result, expected)
|
63 |
+
|
64 |
+
def test_senna_chunk_tagger(self):
|
65 |
+
chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH)
|
66 |
+
result_1 = chktagger.tag("What is the airspeed of an unladen swallow ?".split())
|
67 |
+
expected_1 = [
|
68 |
+
("What", "B-NP"),
|
69 |
+
("is", "B-VP"),
|
70 |
+
("the", "B-NP"),
|
71 |
+
("airspeed", "I-NP"),
|
72 |
+
("of", "B-PP"),
|
73 |
+
("an", "B-NP"),
|
74 |
+
("unladen", "I-NP"),
|
75 |
+
("swallow", "I-NP"),
|
76 |
+
("?", "O"),
|
77 |
+
]
|
78 |
+
|
79 |
+
result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type="NP"))
|
80 |
+
expected_2 = [
|
81 |
+
("What", "0"),
|
82 |
+
("the airspeed", "2-3"),
|
83 |
+
("an unladen swallow", "5-6-7"),
|
84 |
+
]
|
85 |
+
self.assertEqual(result_1, expected_1)
|
86 |
+
self.assertEqual(result_2, expected_2)
|
87 |
+
|
88 |
+
def test_senna_ner_tagger(self):
|
89 |
+
nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH)
|
90 |
+
result_1 = nertagger.tag("Shakespeare theatre was in London .".split())
|
91 |
+
expected_1 = [
|
92 |
+
("Shakespeare", "B-PER"),
|
93 |
+
("theatre", "O"),
|
94 |
+
("was", "O"),
|
95 |
+
("in", "O"),
|
96 |
+
("London", "B-LOC"),
|
97 |
+
(".", "O"),
|
98 |
+
]
|
99 |
+
|
100 |
+
result_2 = nertagger.tag("UN headquarters are in NY , USA .".split())
|
101 |
+
expected_2 = [
|
102 |
+
("UN", "B-ORG"),
|
103 |
+
("headquarters", "O"),
|
104 |
+
("are", "O"),
|
105 |
+
("in", "O"),
|
106 |
+
("NY", "B-LOC"),
|
107 |
+
(",", "O"),
|
108 |
+
("USA", "B-LOC"),
|
109 |
+
(".", "O"),
|
110 |
+
]
|
111 |
+
self.assertEqual(result_1, expected_1)
|
112 |
+
self.assertEqual(result_2, expected_2)
|