applied-ai-018 commited on
Commit
738b6e4
·
verified ·
1 Parent(s): 7ea6729

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__init__.py +0 -0
  2. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_aline.py +48 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_brill.py +34 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py +39 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py +49 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py +85 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_classify.py +49 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py +98 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py +1436 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py +274 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_data.py +15 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py +144 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_distance.py +129 -0
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc ADDED
Binary file (7.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc ADDED
Binary file (773 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc ADDED
Binary file (931 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc ADDED
Binary file (465 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc ADDED
Binary file (935 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc ADDED
Binary file (820 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc ADDED
Binary file (2.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc ADDED
Binary file (747 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_aline.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test Aline algorithm for aligning phonetic sequences
3
+ """
4
+ from nltk.metrics import aline
5
+
6
+
7
+ def test_aline():
8
+ result = aline.align("θin", "tenwis")
9
+ expected = [[("θ", "t"), ("i", "e"), ("n", "n")]]
10
+
11
+ assert result == expected
12
+
13
+ result = aline.align("jo", "ʒə")
14
+ expected = [[("j", "ʒ"), ("o", "ə")]]
15
+
16
+ assert result == expected
17
+
18
+ result = aline.align("pematesiweni", "pematesewen")
19
+ expected = [
20
+ [
21
+ ("p", "p"),
22
+ ("e", "e"),
23
+ ("m", "m"),
24
+ ("a", "a"),
25
+ ("t", "t"),
26
+ ("e", "e"),
27
+ ("s", "s"),
28
+ ("i", "e"),
29
+ ("w", "w"),
30
+ ("e", "e"),
31
+ ("n", "n"),
32
+ ]
33
+ ]
34
+
35
+ assert result == expected
36
+
37
+ result = aline.align("tuwθ", "dentis")
38
+ expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]]
39
+
40
+ assert result == expected
41
+
42
+
43
+ def test_aline_delta():
44
+ """
45
+ Test aline for computing the difference between two segments
46
+ """
47
+ assert aline.delta("p", "q") == 20.0
48
+ assert aline.delta("a", "A") == 0.0
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_brill.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for Brill tagger.
3
+ """
4
+
5
+ import unittest
6
+
7
+ from nltk.corpus import treebank
8
+ from nltk.tag import UnigramTagger, brill, brill_trainer
9
+ from nltk.tbl import demo
10
+
11
+
12
+ class TestBrill(unittest.TestCase):
13
+ def test_pos_template(self):
14
+ train_sents = treebank.tagged_sents()[:1000]
15
+ tagger = UnigramTagger(train_sents)
16
+ trainer = brill_trainer.BrillTaggerTrainer(
17
+ tagger, [brill.Template(brill.Pos([-1]))]
18
+ )
19
+ brill_tagger = trainer.train(train_sents)
20
+ # Example from https://github.com/nltk/nltk/issues/769
21
+ result = brill_tagger.tag("This is a foo bar sentence".split())
22
+ expected = [
23
+ ("This", "DT"),
24
+ ("is", "VBZ"),
25
+ ("a", "DT"),
26
+ ("foo", None),
27
+ ("bar", "NN"),
28
+ ("sentence", None),
29
+ ]
30
+ self.assertEqual(result, expected)
31
+
32
+ @unittest.skip("Should be tested in __main__ of nltk.tbl.demo")
33
+ def test_brill_demo(self):
34
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ from nltk import ConditionalFreqDist, tokenize
6
+
7
+
8
+ class TestEmptyCondFreq(unittest.TestCase):
9
+ def test_tabulate(self):
10
+ empty = ConditionalFreqDist()
11
+ self.assertEqual(empty.conditions(), [])
12
+ with pytest.raises(ValueError):
13
+ empty.tabulate(conditions="BUG") # nonexistent keys shouldn't be added
14
+ self.assertEqual(empty.conditions(), [])
15
+
16
+ def test_plot(self):
17
+ empty = ConditionalFreqDist()
18
+ self.assertEqual(empty.conditions(), [])
19
+ empty.plot(conditions=["BUG"]) # nonexistent keys shouldn't be added
20
+ self.assertEqual(empty.conditions(), [])
21
+
22
+ def test_increment(self):
23
+ # make sure that we can still mutate cfd normally
24
+ text = "cow cat mouse cat tiger"
25
+ cfd = ConditionalFreqDist()
26
+
27
+ # create cfd with word length as condition
28
+ for word in tokenize.word_tokenize(text):
29
+ condition = len(word)
30
+ cfd[condition][word] += 1
31
+
32
+ self.assertEqual(cfd.conditions(), [3, 5])
33
+
34
+ # incrementing previously unseen key is still possible
35
+ cfd[2]["hi"] += 1
36
+ self.assertCountEqual(cfd.conditions(), [3, 5, 2]) # new condition added
37
+ self.assertEqual(
38
+ cfd[2]["hi"], 1
39
+ ) # key's frequency incremented from 0 (unseen) to 1
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import nltk
4
+ from nltk.grammar import CFG
5
+
6
+
7
+ class ChomskyNormalFormForCFGTest(unittest.TestCase):
8
+ def test_simple(self):
9
+ grammar = CFG.fromstring(
10
+ """
11
+ S -> NP VP
12
+ PP -> P NP
13
+ NP -> Det N | NP PP P
14
+ VP -> V NP | VP PP
15
+ VP -> Det
16
+ Det -> 'a' | 'the'
17
+ N -> 'dog' | 'cat'
18
+ V -> 'chased' | 'sat'
19
+ P -> 'on' | 'in'
20
+ """
21
+ )
22
+ self.assertFalse(grammar.is_flexible_chomsky_normal_form())
23
+ self.assertFalse(grammar.is_chomsky_normal_form())
24
+ grammar = grammar.chomsky_normal_form(flexible=True)
25
+ self.assertTrue(grammar.is_flexible_chomsky_normal_form())
26
+ self.assertFalse(grammar.is_chomsky_normal_form())
27
+
28
+ grammar2 = CFG.fromstring(
29
+ """
30
+ S -> NP VP
31
+ NP -> VP N P
32
+ VP -> P
33
+ N -> 'dog' | 'cat'
34
+ P -> 'on' | 'in'
35
+ """
36
+ )
37
+ self.assertFalse(grammar2.is_flexible_chomsky_normal_form())
38
+ self.assertFalse(grammar2.is_chomsky_normal_form())
39
+ grammar2 = grammar2.chomsky_normal_form()
40
+ self.assertTrue(grammar2.is_flexible_chomsky_normal_form())
41
+ self.assertTrue(grammar2.is_chomsky_normal_form())
42
+
43
+ def test_complex(self):
44
+ grammar = nltk.data.load("grammars/large_grammars/atis.cfg")
45
+ self.assertFalse(grammar.is_flexible_chomsky_normal_form())
46
+ self.assertFalse(grammar.is_chomsky_normal_form())
47
+ grammar = grammar.chomsky_normal_form(flexible=True)
48
+ self.assertTrue(grammar.is_flexible_chomsky_normal_form())
49
+ self.assertFalse(grammar.is_chomsky_normal_form())
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from nltk import RegexpParser
4
+
5
+
6
+ class TestChunkRule(unittest.TestCase):
7
+ def test_tag_pattern2re_pattern_quantifier(self):
8
+ """Test for bug https://github.com/nltk/nltk/issues/1597
9
+
10
+ Ensures that curly bracket quantifiers can be used inside a chunk rule.
11
+ This type of quantifier has been used for the supplementary example
12
+ in https://www.nltk.org/book/ch07.html#exploring-text-corpora.
13
+ """
14
+ sent = [
15
+ ("The", "AT"),
16
+ ("September-October", "NP"),
17
+ ("term", "NN"),
18
+ ("jury", "NN"),
19
+ ("had", "HVD"),
20
+ ("been", "BEN"),
21
+ ("charged", "VBN"),
22
+ ("by", "IN"),
23
+ ("Fulton", "NP-TL"),
24
+ ("Superior", "JJ-TL"),
25
+ ("Court", "NN-TL"),
26
+ ("Judge", "NN-TL"),
27
+ ("Durwood", "NP"),
28
+ ("Pye", "NP"),
29
+ ("to", "TO"),
30
+ ("investigate", "VB"),
31
+ ("reports", "NNS"),
32
+ ("of", "IN"),
33
+ ("possible", "JJ"),
34
+ ("``", "``"),
35
+ ("irregularities", "NNS"),
36
+ ("''", "''"),
37
+ ("in", "IN"),
38
+ ("the", "AT"),
39
+ ("hard-fought", "JJ"),
40
+ ("primary", "NN"),
41
+ ("which", "WDT"),
42
+ ("was", "BEDZ"),
43
+ ("won", "VBN"),
44
+ ("by", "IN"),
45
+ ("Mayor-nominate", "NN-TL"),
46
+ ("Ivan", "NP"),
47
+ ("Allen", "NP"),
48
+ ("Jr.", "NP"),
49
+ (".", "."),
50
+ ] # source: brown corpus
51
+ cp = RegexpParser("CHUNK: {<N.*>{4,}}")
52
+ tree = cp.parse(sent)
53
+ assert (
54
+ tree.pformat()
55
+ == """(S
56
+ The/AT
57
+ September-October/NP
58
+ term/NN
59
+ jury/NN
60
+ had/HVD
61
+ been/BEN
62
+ charged/VBN
63
+ by/IN
64
+ Fulton/NP-TL
65
+ Superior/JJ-TL
66
+ (CHUNK Court/NN-TL Judge/NN-TL Durwood/NP Pye/NP)
67
+ to/TO
68
+ investigate/VB
69
+ reports/NNS
70
+ of/IN
71
+ possible/JJ
72
+ ``/``
73
+ irregularities/NNS
74
+ ''/''
75
+ in/IN
76
+ the/AT
77
+ hard-fought/JJ
78
+ primary/NN
79
+ which/WDT
80
+ was/BEDZ
81
+ won/VBN
82
+ by/IN
83
+ (CHUNK Mayor-nominate/NN-TL Ivan/NP Allen/NP Jr./NP)
84
+ ./.)"""
85
+ )
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_classify.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for nltk.classify. See also: nltk/test/classify.doctest
3
+ """
4
+ import pytest
5
+
6
+ from nltk import classify
7
+
8
+ TRAIN = [
9
+ (dict(a=1, b=1, c=1), "y"),
10
+ (dict(a=1, b=1, c=1), "x"),
11
+ (dict(a=1, b=1, c=0), "y"),
12
+ (dict(a=0, b=1, c=1), "x"),
13
+ (dict(a=0, b=1, c=1), "y"),
14
+ (dict(a=0, b=0, c=1), "y"),
15
+ (dict(a=0, b=1, c=0), "x"),
16
+ (dict(a=0, b=0, c=0), "x"),
17
+ (dict(a=0, b=1, c=1), "y"),
18
+ ]
19
+
20
+ TEST = [
21
+ (dict(a=1, b=0, c=1)), # unseen
22
+ (dict(a=1, b=0, c=0)), # unseen
23
+ (dict(a=0, b=1, c=1)), # seen 3 times, labels=y,y,x
24
+ (dict(a=0, b=1, c=0)), # seen 1 time, label=x
25
+ ]
26
+
27
+ RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)]
28
+
29
+
30
+ def assert_classifier_correct(algorithm):
31
+ try:
32
+ classifier = classify.MaxentClassifier.train(
33
+ TRAIN, algorithm, trace=0, max_iter=1000
34
+ )
35
+ except (LookupError, AttributeError) as e:
36
+ pytest.skip(str(e))
37
+
38
+ for (px, py), featureset in zip(RESULTS, TEST):
39
+ pdist = classifier.prob_classify(featureset)
40
+ assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px)
41
+ assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py)
42
+
43
+
44
+ def test_megam():
45
+ assert_classifier_correct("MEGAM")
46
+
47
+
48
+ def test_tadm():
49
+ assert_classifier_correct("TADM")
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import sys
3
+ import unittest
4
+ from io import StringIO
5
+
6
+ from nltk.corpus import gutenberg
7
+ from nltk.text import Text
8
+
9
+
10
+ @contextlib.contextmanager
11
+ def stdout_redirect(where):
12
+ sys.stdout = where
13
+ try:
14
+ yield where
15
+ finally:
16
+ sys.stdout = sys.__stdout__
17
+
18
+
19
+ class TestConcordance(unittest.TestCase):
20
+ """Text constructed using: https://www.nltk.org/book/ch01.html"""
21
+
22
+ @classmethod
23
+ def setUpClass(cls):
24
+ cls.corpus = gutenberg.words("melville-moby_dick.txt")
25
+
26
+ @classmethod
27
+ def tearDownClass(cls):
28
+ pass
29
+
30
+ def setUp(self):
31
+ self.text = Text(TestConcordance.corpus)
32
+ self.query = "monstrous"
33
+ self.maxDiff = None
34
+ self.list_out = [
35
+ "ong the former , one was of a most monstrous size . ... This came towards us , ",
36
+ 'ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r',
37
+ "ll over with a heathenish array of monstrous clubs and spears . Some were thick",
38
+ "d as you gazed , and wondered what monstrous cannibal and savage could ever hav",
39
+ "that has survived the flood ; most monstrous and most mountainous ! That Himmal",
40
+ "they might scout at Moby Dick as a monstrous fable , or still worse and more de",
41
+ "th of Radney .'\" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l",
42
+ "ing Scenes . In connexion with the monstrous pictures of whales , I am strongly",
43
+ "ere to enter upon those still more monstrous stories of them which are to be fo",
44
+ "ght have been rummaged out of this monstrous cabinet there is no telling . But ",
45
+ "of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u",
46
+ ]
47
+
48
+ def tearDown(self):
49
+ pass
50
+
51
+ def test_concordance_list(self):
52
+ concordance_out = self.text.concordance_list(self.query)
53
+ self.assertEqual(self.list_out, [c.line for c in concordance_out])
54
+
55
+ def test_concordance_width(self):
56
+ list_out = [
57
+ "monstrous",
58
+ "monstrous",
59
+ "monstrous",
60
+ "monstrous",
61
+ "monstrous",
62
+ "monstrous",
63
+ "Monstrous",
64
+ "monstrous",
65
+ "monstrous",
66
+ "monstrous",
67
+ "monstrous",
68
+ ]
69
+
70
+ concordance_out = self.text.concordance_list(self.query, width=0)
71
+ self.assertEqual(list_out, [c.query for c in concordance_out])
72
+
73
+ def test_concordance_lines(self):
74
+ concordance_out = self.text.concordance_list(self.query, lines=3)
75
+ self.assertEqual(self.list_out[:3], [c.line for c in concordance_out])
76
+
77
+ def test_concordance_print(self):
78
+ print_out = """Displaying 11 of 11 matches:
79
+ ong the former , one was of a most monstrous size . ... This came towards us ,
80
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
81
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
82
+ d as you gazed , and wondered what monstrous cannibal and savage could ever hav
83
+ that has survived the flood ; most monstrous and most mountainous ! That Himmal
84
+ they might scout at Moby Dick as a monstrous fable , or still worse and more de
85
+ th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
86
+ ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
87
+ ere to enter upon those still more monstrous stories of them which are to be fo
88
+ ght have been rummaged out of this monstrous cabinet there is no telling . But
89
+ of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
90
+ """
91
+
92
+ with stdout_redirect(StringIO()) as stdout:
93
+ self.text.concordance(self.query)
94
+
95
+ def strip_space(raw_str):
96
+ return raw_str.replace(" ", "")
97
+
98
+ self.assertEqual(strip_space(print_out), strip_space(stdout.getvalue()))
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py ADDED
@@ -0,0 +1,1436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Mock test for Stanford CoreNLP wrappers.
3
+ """
4
+
5
+ from unittest import TestCase
6
+ from unittest.mock import MagicMock
7
+
8
+ import pytest
9
+
10
+ from nltk.parse import corenlp
11
+ from nltk.tree import Tree
12
+
13
+
14
+ def setup_module(module):
15
+ global server
16
+
17
+ try:
18
+ server = corenlp.CoreNLPServer(port=9000)
19
+ except LookupError:
20
+ pytest.skip("Could not instantiate CoreNLPServer.")
21
+
22
+ try:
23
+ server.start()
24
+ except corenlp.CoreNLPServerError as e:
25
+ pytest.skip(
26
+ "Skipping CoreNLP tests because the server could not be started. "
27
+ "Make sure that the 9000 port is free. "
28
+ "{}".format(e.strerror)
29
+ )
30
+
31
+
32
+ def teardown_module(module):
33
+ server.stop()
34
+
35
+
36
+ class TestTokenizerAPI(TestCase):
37
+ def test_tokenize(self):
38
+ corenlp_tokenizer = corenlp.CoreNLPParser()
39
+
40
+ api_return_value = {
41
+ "sentences": [
42
+ {
43
+ "index": 0,
44
+ "tokens": [
45
+ {
46
+ "after": " ",
47
+ "before": "",
48
+ "characterOffsetBegin": 0,
49
+ "characterOffsetEnd": 4,
50
+ "index": 1,
51
+ "originalText": "Good",
52
+ "word": "Good",
53
+ },
54
+ {
55
+ "after": " ",
56
+ "before": " ",
57
+ "characterOffsetBegin": 5,
58
+ "characterOffsetEnd": 12,
59
+ "index": 2,
60
+ "originalText": "muffins",
61
+ "word": "muffins",
62
+ },
63
+ {
64
+ "after": " ",
65
+ "before": " ",
66
+ "characterOffsetBegin": 13,
67
+ "characterOffsetEnd": 17,
68
+ "index": 3,
69
+ "originalText": "cost",
70
+ "word": "cost",
71
+ },
72
+ {
73
+ "after": "",
74
+ "before": " ",
75
+ "characterOffsetBegin": 18,
76
+ "characterOffsetEnd": 19,
77
+ "index": 4,
78
+ "originalText": "$",
79
+ "word": "$",
80
+ },
81
+ {
82
+ "after": "\n",
83
+ "before": "",
84
+ "characterOffsetBegin": 19,
85
+ "characterOffsetEnd": 23,
86
+ "index": 5,
87
+ "originalText": "3.88",
88
+ "word": "3.88",
89
+ },
90
+ {
91
+ "after": " ",
92
+ "before": "\n",
93
+ "characterOffsetBegin": 24,
94
+ "characterOffsetEnd": 26,
95
+ "index": 6,
96
+ "originalText": "in",
97
+ "word": "in",
98
+ },
99
+ {
100
+ "after": " ",
101
+ "before": " ",
102
+ "characterOffsetBegin": 27,
103
+ "characterOffsetEnd": 30,
104
+ "index": 7,
105
+ "originalText": "New",
106
+ "word": "New",
107
+ },
108
+ {
109
+ "after": "",
110
+ "before": " ",
111
+ "characterOffsetBegin": 31,
112
+ "characterOffsetEnd": 35,
113
+ "index": 8,
114
+ "originalText": "York",
115
+ "word": "York",
116
+ },
117
+ {
118
+ "after": " ",
119
+ "before": "",
120
+ "characterOffsetBegin": 35,
121
+ "characterOffsetEnd": 36,
122
+ "index": 9,
123
+ "originalText": ".",
124
+ "word": ".",
125
+ },
126
+ ],
127
+ },
128
+ {
129
+ "index": 1,
130
+ "tokens": [
131
+ {
132
+ "after": " ",
133
+ "before": " ",
134
+ "characterOffsetBegin": 38,
135
+ "characterOffsetEnd": 44,
136
+ "index": 1,
137
+ "originalText": "Please",
138
+ "word": "Please",
139
+ },
140
+ {
141
+ "after": " ",
142
+ "before": " ",
143
+ "characterOffsetBegin": 45,
144
+ "characterOffsetEnd": 48,
145
+ "index": 2,
146
+ "originalText": "buy",
147
+ "word": "buy",
148
+ },
149
+ {
150
+ "after": "\n",
151
+ "before": " ",
152
+ "characterOffsetBegin": 49,
153
+ "characterOffsetEnd": 51,
154
+ "index": 3,
155
+ "originalText": "me",
156
+ "word": "me",
157
+ },
158
+ {
159
+ "after": " ",
160
+ "before": "\n",
161
+ "characterOffsetBegin": 52,
162
+ "characterOffsetEnd": 55,
163
+ "index": 4,
164
+ "originalText": "two",
165
+ "word": "two",
166
+ },
167
+ {
168
+ "after": " ",
169
+ "before": " ",
170
+ "characterOffsetBegin": 56,
171
+ "characterOffsetEnd": 58,
172
+ "index": 5,
173
+ "originalText": "of",
174
+ "word": "of",
175
+ },
176
+ {
177
+ "after": "",
178
+ "before": " ",
179
+ "characterOffsetBegin": 59,
180
+ "characterOffsetEnd": 63,
181
+ "index": 6,
182
+ "originalText": "them",
183
+ "word": "them",
184
+ },
185
+ {
186
+ "after": "\n",
187
+ "before": "",
188
+ "characterOffsetBegin": 63,
189
+ "characterOffsetEnd": 64,
190
+ "index": 7,
191
+ "originalText": ".",
192
+ "word": ".",
193
+ },
194
+ ],
195
+ },
196
+ {
197
+ "index": 2,
198
+ "tokens": [
199
+ {
200
+ "after": "",
201
+ "before": "\n",
202
+ "characterOffsetBegin": 65,
203
+ "characterOffsetEnd": 71,
204
+ "index": 1,
205
+ "originalText": "Thanks",
206
+ "word": "Thanks",
207
+ },
208
+ {
209
+ "after": "",
210
+ "before": "",
211
+ "characterOffsetBegin": 71,
212
+ "characterOffsetEnd": 72,
213
+ "index": 2,
214
+ "originalText": ".",
215
+ "word": ".",
216
+ },
217
+ ],
218
+ },
219
+ ]
220
+ }
221
+ corenlp_tokenizer.api_call = MagicMock(return_value=api_return_value)
222
+
223
+ input_string = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks."
224
+
225
+ expected_output = [
226
+ "Good",
227
+ "muffins",
228
+ "cost",
229
+ "$",
230
+ "3.88",
231
+ "in",
232
+ "New",
233
+ "York",
234
+ ".",
235
+ "Please",
236
+ "buy",
237
+ "me",
238
+ "two",
239
+ "of",
240
+ "them",
241
+ ".",
242
+ "Thanks",
243
+ ".",
244
+ ]
245
+
246
+ tokenized_output = list(corenlp_tokenizer.tokenize(input_string))
247
+
248
+ corenlp_tokenizer.api_call.assert_called_once_with(
249
+ "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.",
250
+ properties={"annotators": "tokenize,ssplit"},
251
+ )
252
+ self.assertEqual(expected_output, tokenized_output)
253
+
254
+
255
+ class TestTaggerAPI(TestCase):
256
+ def test_pos_tagger(self):
257
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="pos")
258
+
259
+ api_return_value = {
260
+ "sentences": [
261
+ {
262
+ "basicDependencies": [
263
+ {
264
+ "dep": "ROOT",
265
+ "dependent": 1,
266
+ "dependentGloss": "What",
267
+ "governor": 0,
268
+ "governorGloss": "ROOT",
269
+ },
270
+ {
271
+ "dep": "cop",
272
+ "dependent": 2,
273
+ "dependentGloss": "is",
274
+ "governor": 1,
275
+ "governorGloss": "What",
276
+ },
277
+ {
278
+ "dep": "det",
279
+ "dependent": 3,
280
+ "dependentGloss": "the",
281
+ "governor": 4,
282
+ "governorGloss": "airspeed",
283
+ },
284
+ {
285
+ "dep": "nsubj",
286
+ "dependent": 4,
287
+ "dependentGloss": "airspeed",
288
+ "governor": 1,
289
+ "governorGloss": "What",
290
+ },
291
+ {
292
+ "dep": "case",
293
+ "dependent": 5,
294
+ "dependentGloss": "of",
295
+ "governor": 8,
296
+ "governorGloss": "swallow",
297
+ },
298
+ {
299
+ "dep": "det",
300
+ "dependent": 6,
301
+ "dependentGloss": "an",
302
+ "governor": 8,
303
+ "governorGloss": "swallow",
304
+ },
305
+ {
306
+ "dep": "compound",
307
+ "dependent": 7,
308
+ "dependentGloss": "unladen",
309
+ "governor": 8,
310
+ "governorGloss": "swallow",
311
+ },
312
+ {
313
+ "dep": "nmod",
314
+ "dependent": 8,
315
+ "dependentGloss": "swallow",
316
+ "governor": 4,
317
+ "governorGloss": "airspeed",
318
+ },
319
+ {
320
+ "dep": "punct",
321
+ "dependent": 9,
322
+ "dependentGloss": "?",
323
+ "governor": 1,
324
+ "governorGloss": "What",
325
+ },
326
+ ],
327
+ "enhancedDependencies": [
328
+ {
329
+ "dep": "ROOT",
330
+ "dependent": 1,
331
+ "dependentGloss": "What",
332
+ "governor": 0,
333
+ "governorGloss": "ROOT",
334
+ },
335
+ {
336
+ "dep": "cop",
337
+ "dependent": 2,
338
+ "dependentGloss": "is",
339
+ "governor": 1,
340
+ "governorGloss": "What",
341
+ },
342
+ {
343
+ "dep": "det",
344
+ "dependent": 3,
345
+ "dependentGloss": "the",
346
+ "governor": 4,
347
+ "governorGloss": "airspeed",
348
+ },
349
+ {
350
+ "dep": "nsubj",
351
+ "dependent": 4,
352
+ "dependentGloss": "airspeed",
353
+ "governor": 1,
354
+ "governorGloss": "What",
355
+ },
356
+ {
357
+ "dep": "case",
358
+ "dependent": 5,
359
+ "dependentGloss": "of",
360
+ "governor": 8,
361
+ "governorGloss": "swallow",
362
+ },
363
+ {
364
+ "dep": "det",
365
+ "dependent": 6,
366
+ "dependentGloss": "an",
367
+ "governor": 8,
368
+ "governorGloss": "swallow",
369
+ },
370
+ {
371
+ "dep": "compound",
372
+ "dependent": 7,
373
+ "dependentGloss": "unladen",
374
+ "governor": 8,
375
+ "governorGloss": "swallow",
376
+ },
377
+ {
378
+ "dep": "nmod:of",
379
+ "dependent": 8,
380
+ "dependentGloss": "swallow",
381
+ "governor": 4,
382
+ "governorGloss": "airspeed",
383
+ },
384
+ {
385
+ "dep": "punct",
386
+ "dependent": 9,
387
+ "dependentGloss": "?",
388
+ "governor": 1,
389
+ "governorGloss": "What",
390
+ },
391
+ ],
392
+ "enhancedPlusPlusDependencies": [
393
+ {
394
+ "dep": "ROOT",
395
+ "dependent": 1,
396
+ "dependentGloss": "What",
397
+ "governor": 0,
398
+ "governorGloss": "ROOT",
399
+ },
400
+ {
401
+ "dep": "cop",
402
+ "dependent": 2,
403
+ "dependentGloss": "is",
404
+ "governor": 1,
405
+ "governorGloss": "What",
406
+ },
407
+ {
408
+ "dep": "det",
409
+ "dependent": 3,
410
+ "dependentGloss": "the",
411
+ "governor": 4,
412
+ "governorGloss": "airspeed",
413
+ },
414
+ {
415
+ "dep": "nsubj",
416
+ "dependent": 4,
417
+ "dependentGloss": "airspeed",
418
+ "governor": 1,
419
+ "governorGloss": "What",
420
+ },
421
+ {
422
+ "dep": "case",
423
+ "dependent": 5,
424
+ "dependentGloss": "of",
425
+ "governor": 8,
426
+ "governorGloss": "swallow",
427
+ },
428
+ {
429
+ "dep": "det",
430
+ "dependent": 6,
431
+ "dependentGloss": "an",
432
+ "governor": 8,
433
+ "governorGloss": "swallow",
434
+ },
435
+ {
436
+ "dep": "compound",
437
+ "dependent": 7,
438
+ "dependentGloss": "unladen",
439
+ "governor": 8,
440
+ "governorGloss": "swallow",
441
+ },
442
+ {
443
+ "dep": "nmod:of",
444
+ "dependent": 8,
445
+ "dependentGloss": "swallow",
446
+ "governor": 4,
447
+ "governorGloss": "airspeed",
448
+ },
449
+ {
450
+ "dep": "punct",
451
+ "dependent": 9,
452
+ "dependentGloss": "?",
453
+ "governor": 1,
454
+ "governorGloss": "What",
455
+ },
456
+ ],
457
+ "index": 0,
458
+ "parse": "(ROOT\n (SBARQ\n (WHNP (WP What))\n (SQ (VBZ is)\n (NP\n (NP (DT the) (NN airspeed))\n (PP (IN of)\n (NP (DT an) (NN unladen) (NN swallow)))))\n (. ?)))",
459
+ "tokens": [
460
+ {
461
+ "after": " ",
462
+ "before": "",
463
+ "characterOffsetBegin": 0,
464
+ "characterOffsetEnd": 4,
465
+ "index": 1,
466
+ "lemma": "what",
467
+ "originalText": "What",
468
+ "pos": "WP",
469
+ "word": "What",
470
+ },
471
+ {
472
+ "after": " ",
473
+ "before": " ",
474
+ "characterOffsetBegin": 5,
475
+ "characterOffsetEnd": 7,
476
+ "index": 2,
477
+ "lemma": "be",
478
+ "originalText": "is",
479
+ "pos": "VBZ",
480
+ "word": "is",
481
+ },
482
+ {
483
+ "after": " ",
484
+ "before": " ",
485
+ "characterOffsetBegin": 8,
486
+ "characterOffsetEnd": 11,
487
+ "index": 3,
488
+ "lemma": "the",
489
+ "originalText": "the",
490
+ "pos": "DT",
491
+ "word": "the",
492
+ },
493
+ {
494
+ "after": " ",
495
+ "before": " ",
496
+ "characterOffsetBegin": 12,
497
+ "characterOffsetEnd": 20,
498
+ "index": 4,
499
+ "lemma": "airspeed",
500
+ "originalText": "airspeed",
501
+ "pos": "NN",
502
+ "word": "airspeed",
503
+ },
504
+ {
505
+ "after": " ",
506
+ "before": " ",
507
+ "characterOffsetBegin": 21,
508
+ "characterOffsetEnd": 23,
509
+ "index": 5,
510
+ "lemma": "of",
511
+ "originalText": "of",
512
+ "pos": "IN",
513
+ "word": "of",
514
+ },
515
+ {
516
+ "after": " ",
517
+ "before": " ",
518
+ "characterOffsetBegin": 24,
519
+ "characterOffsetEnd": 26,
520
+ "index": 6,
521
+ "lemma": "a",
522
+ "originalText": "an",
523
+ "pos": "DT",
524
+ "word": "an",
525
+ },
526
+ {
527
+ "after": " ",
528
+ "before": " ",
529
+ "characterOffsetBegin": 27,
530
+ "characterOffsetEnd": 34,
531
+ "index": 7,
532
+ "lemma": "unladen",
533
+ "originalText": "unladen",
534
+ "pos": "JJ",
535
+ "word": "unladen",
536
+ },
537
+ {
538
+ "after": " ",
539
+ "before": " ",
540
+ "characterOffsetBegin": 35,
541
+ "characterOffsetEnd": 42,
542
+ "index": 8,
543
+ "lemma": "swallow",
544
+ "originalText": "swallow",
545
+ "pos": "VB",
546
+ "word": "swallow",
547
+ },
548
+ {
549
+ "after": "",
550
+ "before": " ",
551
+ "characterOffsetBegin": 43,
552
+ "characterOffsetEnd": 44,
553
+ "index": 9,
554
+ "lemma": "?",
555
+ "originalText": "?",
556
+ "pos": ".",
557
+ "word": "?",
558
+ },
559
+ ],
560
+ }
561
+ ]
562
+ }
563
+ corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
564
+
565
+ input_tokens = "What is the airspeed of an unladen swallow ?".split()
566
+ expected_output = [
567
+ ("What", "WP"),
568
+ ("is", "VBZ"),
569
+ ("the", "DT"),
570
+ ("airspeed", "NN"),
571
+ ("of", "IN"),
572
+ ("an", "DT"),
573
+ ("unladen", "JJ"),
574
+ ("swallow", "VB"),
575
+ ("?", "."),
576
+ ]
577
+ tagged_output = corenlp_tagger.tag(input_tokens)
578
+
579
+ corenlp_tagger.api_call.assert_called_once_with(
580
+ "What is the airspeed of an unladen swallow ?",
581
+ properties={
582
+ "ssplit.isOneSentence": "true",
583
+ "annotators": "tokenize,ssplit,pos",
584
+ },
585
+ )
586
+ self.assertEqual(expected_output, tagged_output)
587
+
588
+ def test_ner_tagger(self):
589
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="ner")
590
+
591
+ api_return_value = {
592
+ "sentences": [
593
+ {
594
+ "index": 0,
595
+ "tokens": [
596
+ {
597
+ "after": " ",
598
+ "before": "",
599
+ "characterOffsetBegin": 0,
600
+ "characterOffsetEnd": 4,
601
+ "index": 1,
602
+ "lemma": "Rami",
603
+ "ner": "PERSON",
604
+ "originalText": "Rami",
605
+ "pos": "NNP",
606
+ "word": "Rami",
607
+ },
608
+ {
609
+ "after": " ",
610
+ "before": " ",
611
+ "characterOffsetBegin": 5,
612
+ "characterOffsetEnd": 8,
613
+ "index": 2,
614
+ "lemma": "Eid",
615
+ "ner": "PERSON",
616
+ "originalText": "Eid",
617
+ "pos": "NNP",
618
+ "word": "Eid",
619
+ },
620
+ {
621
+ "after": " ",
622
+ "before": " ",
623
+ "characterOffsetBegin": 9,
624
+ "characterOffsetEnd": 11,
625
+ "index": 3,
626
+ "lemma": "be",
627
+ "ner": "O",
628
+ "originalText": "is",
629
+ "pos": "VBZ",
630
+ "word": "is",
631
+ },
632
+ {
633
+ "after": " ",
634
+ "before": " ",
635
+ "characterOffsetBegin": 12,
636
+ "characterOffsetEnd": 20,
637
+ "index": 4,
638
+ "lemma": "study",
639
+ "ner": "O",
640
+ "originalText": "studying",
641
+ "pos": "VBG",
642
+ "word": "studying",
643
+ },
644
+ {
645
+ "after": " ",
646
+ "before": " ",
647
+ "characterOffsetBegin": 21,
648
+ "characterOffsetEnd": 23,
649
+ "index": 5,
650
+ "lemma": "at",
651
+ "ner": "O",
652
+ "originalText": "at",
653
+ "pos": "IN",
654
+ "word": "at",
655
+ },
656
+ {
657
+ "after": " ",
658
+ "before": " ",
659
+ "characterOffsetBegin": 24,
660
+ "characterOffsetEnd": 29,
661
+ "index": 6,
662
+ "lemma": "Stony",
663
+ "ner": "ORGANIZATION",
664
+ "originalText": "Stony",
665
+ "pos": "NNP",
666
+ "word": "Stony",
667
+ },
668
+ {
669
+ "after": " ",
670
+ "before": " ",
671
+ "characterOffsetBegin": 30,
672
+ "characterOffsetEnd": 35,
673
+ "index": 7,
674
+ "lemma": "Brook",
675
+ "ner": "ORGANIZATION",
676
+ "originalText": "Brook",
677
+ "pos": "NNP",
678
+ "word": "Brook",
679
+ },
680
+ {
681
+ "after": " ",
682
+ "before": " ",
683
+ "characterOffsetBegin": 36,
684
+ "characterOffsetEnd": 46,
685
+ "index": 8,
686
+ "lemma": "University",
687
+ "ner": "ORGANIZATION",
688
+ "originalText": "University",
689
+ "pos": "NNP",
690
+ "word": "University",
691
+ },
692
+ {
693
+ "after": " ",
694
+ "before": " ",
695
+ "characterOffsetBegin": 47,
696
+ "characterOffsetEnd": 49,
697
+ "index": 9,
698
+ "lemma": "in",
699
+ "ner": "O",
700
+ "originalText": "in",
701
+ "pos": "IN",
702
+ "word": "in",
703
+ },
704
+ {
705
+ "after": "",
706
+ "before": " ",
707
+ "characterOffsetBegin": 50,
708
+ "characterOffsetEnd": 52,
709
+ "index": 10,
710
+ "lemma": "NY",
711
+ "ner": "O",
712
+ "originalText": "NY",
713
+ "pos": "NNP",
714
+ "word": "NY",
715
+ },
716
+ ],
717
+ }
718
+ ]
719
+ }
720
+
721
+ corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
722
+
723
+ input_tokens = "Rami Eid is studying at Stony Brook University in NY".split()
724
+ expected_output = [
725
+ ("Rami", "PERSON"),
726
+ ("Eid", "PERSON"),
727
+ ("is", "O"),
728
+ ("studying", "O"),
729
+ ("at", "O"),
730
+ ("Stony", "ORGANIZATION"),
731
+ ("Brook", "ORGANIZATION"),
732
+ ("University", "ORGANIZATION"),
733
+ ("in", "O"),
734
+ ("NY", "O"),
735
+ ]
736
+ tagged_output = corenlp_tagger.tag(input_tokens)
737
+
738
+ corenlp_tagger.api_call.assert_called_once_with(
739
+ "Rami Eid is studying at Stony Brook University in NY",
740
+ properties={
741
+ "ssplit.isOneSentence": "true",
742
+ "annotators": "tokenize,ssplit,ner",
743
+ },
744
+ )
745
+ self.assertEqual(expected_output, tagged_output)
746
+
747
+ def test_unexpected_tagtype(self):
748
+ with self.assertRaises(ValueError):
749
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="test")
750
+
751
+
752
+ class TestParserAPI(TestCase):
753
+ def test_parse(self):
754
+ corenlp_parser = corenlp.CoreNLPParser()
755
+
756
+ api_return_value = {
757
+ "sentences": [
758
+ {
759
+ "basicDependencies": [
760
+ {
761
+ "dep": "ROOT",
762
+ "dependent": 4,
763
+ "dependentGloss": "fox",
764
+ "governor": 0,
765
+ "governorGloss": "ROOT",
766
+ },
767
+ {
768
+ "dep": "det",
769
+ "dependent": 1,
770
+ "dependentGloss": "The",
771
+ "governor": 4,
772
+ "governorGloss": "fox",
773
+ },
774
+ {
775
+ "dep": "amod",
776
+ "dependent": 2,
777
+ "dependentGloss": "quick",
778
+ "governor": 4,
779
+ "governorGloss": "fox",
780
+ },
781
+ {
782
+ "dep": "amod",
783
+ "dependent": 3,
784
+ "dependentGloss": "brown",
785
+ "governor": 4,
786
+ "governorGloss": "fox",
787
+ },
788
+ {
789
+ "dep": "dep",
790
+ "dependent": 5,
791
+ "dependentGloss": "jumps",
792
+ "governor": 4,
793
+ "governorGloss": "fox",
794
+ },
795
+ {
796
+ "dep": "case",
797
+ "dependent": 6,
798
+ "dependentGloss": "over",
799
+ "governor": 9,
800
+ "governorGloss": "dog",
801
+ },
802
+ {
803
+ "dep": "det",
804
+ "dependent": 7,
805
+ "dependentGloss": "the",
806
+ "governor": 9,
807
+ "governorGloss": "dog",
808
+ },
809
+ {
810
+ "dep": "amod",
811
+ "dependent": 8,
812
+ "dependentGloss": "lazy",
813
+ "governor": 9,
814
+ "governorGloss": "dog",
815
+ },
816
+ {
817
+ "dep": "nmod",
818
+ "dependent": 9,
819
+ "dependentGloss": "dog",
820
+ "governor": 5,
821
+ "governorGloss": "jumps",
822
+ },
823
+ ],
824
+ "enhancedDependencies": [
825
+ {
826
+ "dep": "ROOT",
827
+ "dependent": 4,
828
+ "dependentGloss": "fox",
829
+ "governor": 0,
830
+ "governorGloss": "ROOT",
831
+ },
832
+ {
833
+ "dep": "det",
834
+ "dependent": 1,
835
+ "dependentGloss": "The",
836
+ "governor": 4,
837
+ "governorGloss": "fox",
838
+ },
839
+ {
840
+ "dep": "amod",
841
+ "dependent": 2,
842
+ "dependentGloss": "quick",
843
+ "governor": 4,
844
+ "governorGloss": "fox",
845
+ },
846
+ {
847
+ "dep": "amod",
848
+ "dependent": 3,
849
+ "dependentGloss": "brown",
850
+ "governor": 4,
851
+ "governorGloss": "fox",
852
+ },
853
+ {
854
+ "dep": "dep",
855
+ "dependent": 5,
856
+ "dependentGloss": "jumps",
857
+ "governor": 4,
858
+ "governorGloss": "fox",
859
+ },
860
+ {
861
+ "dep": "case",
862
+ "dependent": 6,
863
+ "dependentGloss": "over",
864
+ "governor": 9,
865
+ "governorGloss": "dog",
866
+ },
867
+ {
868
+ "dep": "det",
869
+ "dependent": 7,
870
+ "dependentGloss": "the",
871
+ "governor": 9,
872
+ "governorGloss": "dog",
873
+ },
874
+ {
875
+ "dep": "amod",
876
+ "dependent": 8,
877
+ "dependentGloss": "lazy",
878
+ "governor": 9,
879
+ "governorGloss": "dog",
880
+ },
881
+ {
882
+ "dep": "nmod:over",
883
+ "dependent": 9,
884
+ "dependentGloss": "dog",
885
+ "governor": 5,
886
+ "governorGloss": "jumps",
887
+ },
888
+ ],
889
+ "enhancedPlusPlusDependencies": [
890
+ {
891
+ "dep": "ROOT",
892
+ "dependent": 4,
893
+ "dependentGloss": "fox",
894
+ "governor": 0,
895
+ "governorGloss": "ROOT",
896
+ },
897
+ {
898
+ "dep": "det",
899
+ "dependent": 1,
900
+ "dependentGloss": "The",
901
+ "governor": 4,
902
+ "governorGloss": "fox",
903
+ },
904
+ {
905
+ "dep": "amod",
906
+ "dependent": 2,
907
+ "dependentGloss": "quick",
908
+ "governor": 4,
909
+ "governorGloss": "fox",
910
+ },
911
+ {
912
+ "dep": "amod",
913
+ "dependent": 3,
914
+ "dependentGloss": "brown",
915
+ "governor": 4,
916
+ "governorGloss": "fox",
917
+ },
918
+ {
919
+ "dep": "dep",
920
+ "dependent": 5,
921
+ "dependentGloss": "jumps",
922
+ "governor": 4,
923
+ "governorGloss": "fox",
924
+ },
925
+ {
926
+ "dep": "case",
927
+ "dependent": 6,
928
+ "dependentGloss": "over",
929
+ "governor": 9,
930
+ "governorGloss": "dog",
931
+ },
932
+ {
933
+ "dep": "det",
934
+ "dependent": 7,
935
+ "dependentGloss": "the",
936
+ "governor": 9,
937
+ "governorGloss": "dog",
938
+ },
939
+ {
940
+ "dep": "amod",
941
+ "dependent": 8,
942
+ "dependentGloss": "lazy",
943
+ "governor": 9,
944
+ "governorGloss": "dog",
945
+ },
946
+ {
947
+ "dep": "nmod:over",
948
+ "dependent": 9,
949
+ "dependentGloss": "dog",
950
+ "governor": 5,
951
+ "governorGloss": "jumps",
952
+ },
953
+ ],
954
+ "index": 0,
955
+ "parse": "(ROOT\n (NP\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\n (NP\n (NP (NNS jumps))\n (PP (IN over)\n (NP (DT the) (JJ lazy) (NN dog))))))",
956
+ "tokens": [
957
+ {
958
+ "after": " ",
959
+ "before": "",
960
+ "characterOffsetBegin": 0,
961
+ "characterOffsetEnd": 3,
962
+ "index": 1,
963
+ "lemma": "the",
964
+ "originalText": "The",
965
+ "pos": "DT",
966
+ "word": "The",
967
+ },
968
+ {
969
+ "after": " ",
970
+ "before": " ",
971
+ "characterOffsetBegin": 4,
972
+ "characterOffsetEnd": 9,
973
+ "index": 2,
974
+ "lemma": "quick",
975
+ "originalText": "quick",
976
+ "pos": "JJ",
977
+ "word": "quick",
978
+ },
979
+ {
980
+ "after": " ",
981
+ "before": " ",
982
+ "characterOffsetBegin": 10,
983
+ "characterOffsetEnd": 15,
984
+ "index": 3,
985
+ "lemma": "brown",
986
+ "originalText": "brown",
987
+ "pos": "JJ",
988
+ "word": "brown",
989
+ },
990
+ {
991
+ "after": " ",
992
+ "before": " ",
993
+ "characterOffsetBegin": 16,
994
+ "characterOffsetEnd": 19,
995
+ "index": 4,
996
+ "lemma": "fox",
997
+ "originalText": "fox",
998
+ "pos": "NN",
999
+ "word": "fox",
1000
+ },
1001
+ {
1002
+ "after": " ",
1003
+ "before": " ",
1004
+ "characterOffsetBegin": 20,
1005
+ "characterOffsetEnd": 25,
1006
+ "index": 5,
1007
+ "lemma": "jump",
1008
+ "originalText": "jumps",
1009
+ "pos": "VBZ",
1010
+ "word": "jumps",
1011
+ },
1012
+ {
1013
+ "after": " ",
1014
+ "before": " ",
1015
+ "characterOffsetBegin": 26,
1016
+ "characterOffsetEnd": 30,
1017
+ "index": 6,
1018
+ "lemma": "over",
1019
+ "originalText": "over",
1020
+ "pos": "IN",
1021
+ "word": "over",
1022
+ },
1023
+ {
1024
+ "after": " ",
1025
+ "before": " ",
1026
+ "characterOffsetBegin": 31,
1027
+ "characterOffsetEnd": 34,
1028
+ "index": 7,
1029
+ "lemma": "the",
1030
+ "originalText": "the",
1031
+ "pos": "DT",
1032
+ "word": "the",
1033
+ },
1034
+ {
1035
+ "after": " ",
1036
+ "before": " ",
1037
+ "characterOffsetBegin": 35,
1038
+ "characterOffsetEnd": 39,
1039
+ "index": 8,
1040
+ "lemma": "lazy",
1041
+ "originalText": "lazy",
1042
+ "pos": "JJ",
1043
+ "word": "lazy",
1044
+ },
1045
+ {
1046
+ "after": "",
1047
+ "before": " ",
1048
+ "characterOffsetBegin": 40,
1049
+ "characterOffsetEnd": 43,
1050
+ "index": 9,
1051
+ "lemma": "dog",
1052
+ "originalText": "dog",
1053
+ "pos": "NN",
1054
+ "word": "dog",
1055
+ },
1056
+ ],
1057
+ }
1058
+ ]
1059
+ }
1060
+
1061
+ corenlp_parser.api_call = MagicMock(return_value=api_return_value)
1062
+
1063
+ input_string = "The quick brown fox jumps over the lazy dog".split()
1064
+ expected_output = Tree(
1065
+ "ROOT",
1066
+ [
1067
+ Tree(
1068
+ "NP",
1069
+ [
1070
+ Tree(
1071
+ "NP",
1072
+ [
1073
+ Tree("DT", ["The"]),
1074
+ Tree("JJ", ["quick"]),
1075
+ Tree("JJ", ["brown"]),
1076
+ Tree("NN", ["fox"]),
1077
+ ],
1078
+ ),
1079
+ Tree(
1080
+ "NP",
1081
+ [
1082
+ Tree("NP", [Tree("NNS", ["jumps"])]),
1083
+ Tree(
1084
+ "PP",
1085
+ [
1086
+ Tree("IN", ["over"]),
1087
+ Tree(
1088
+ "NP",
1089
+ [
1090
+ Tree("DT", ["the"]),
1091
+ Tree("JJ", ["lazy"]),
1092
+ Tree("NN", ["dog"]),
1093
+ ],
1094
+ ),
1095
+ ],
1096
+ ),
1097
+ ],
1098
+ ),
1099
+ ],
1100
+ )
1101
+ ],
1102
+ )
1103
+
1104
+ parsed_data = next(corenlp_parser.parse(input_string))
1105
+
1106
+ corenlp_parser.api_call.assert_called_once_with(
1107
+ "The quick brown fox jumps over the lazy dog",
1108
+ properties={"ssplit.eolonly": "true"},
1109
+ )
1110
+ self.assertEqual(expected_output, parsed_data)
1111
+
1112
+ def test_dependency_parser(self):
1113
+ corenlp_parser = corenlp.CoreNLPDependencyParser()
1114
+
1115
+ api_return_value = {
1116
+ "sentences": [
1117
+ {
1118
+ "basicDependencies": [
1119
+ {
1120
+ "dep": "ROOT",
1121
+ "dependent": 5,
1122
+ "dependentGloss": "jumps",
1123
+ "governor": 0,
1124
+ "governorGloss": "ROOT",
1125
+ },
1126
+ {
1127
+ "dep": "det",
1128
+ "dependent": 1,
1129
+ "dependentGloss": "The",
1130
+ "governor": 4,
1131
+ "governorGloss": "fox",
1132
+ },
1133
+ {
1134
+ "dep": "amod",
1135
+ "dependent": 2,
1136
+ "dependentGloss": "quick",
1137
+ "governor": 4,
1138
+ "governorGloss": "fox",
1139
+ },
1140
+ {
1141
+ "dep": "amod",
1142
+ "dependent": 3,
1143
+ "dependentGloss": "brown",
1144
+ "governor": 4,
1145
+ "governorGloss": "fox",
1146
+ },
1147
+ {
1148
+ "dep": "nsubj",
1149
+ "dependent": 4,
1150
+ "dependentGloss": "fox",
1151
+ "governor": 5,
1152
+ "governorGloss": "jumps",
1153
+ },
1154
+ {
1155
+ "dep": "case",
1156
+ "dependent": 6,
1157
+ "dependentGloss": "over",
1158
+ "governor": 9,
1159
+ "governorGloss": "dog",
1160
+ },
1161
+ {
1162
+ "dep": "det",
1163
+ "dependent": 7,
1164
+ "dependentGloss": "the",
1165
+ "governor": 9,
1166
+ "governorGloss": "dog",
1167
+ },
1168
+ {
1169
+ "dep": "amod",
1170
+ "dependent": 8,
1171
+ "dependentGloss": "lazy",
1172
+ "governor": 9,
1173
+ "governorGloss": "dog",
1174
+ },
1175
+ {
1176
+ "dep": "nmod",
1177
+ "dependent": 9,
1178
+ "dependentGloss": "dog",
1179
+ "governor": 5,
1180
+ "governorGloss": "jumps",
1181
+ },
1182
+ ],
1183
+ "enhancedDependencies": [
1184
+ {
1185
+ "dep": "ROOT",
1186
+ "dependent": 5,
1187
+ "dependentGloss": "jumps",
1188
+ "governor": 0,
1189
+ "governorGloss": "ROOT",
1190
+ },
1191
+ {
1192
+ "dep": "det",
1193
+ "dependent": 1,
1194
+ "dependentGloss": "The",
1195
+ "governor": 4,
1196
+ "governorGloss": "fox",
1197
+ },
1198
+ {
1199
+ "dep": "amod",
1200
+ "dependent": 2,
1201
+ "dependentGloss": "quick",
1202
+ "governor": 4,
1203
+ "governorGloss": "fox",
1204
+ },
1205
+ {
1206
+ "dep": "amod",
1207
+ "dependent": 3,
1208
+ "dependentGloss": "brown",
1209
+ "governor": 4,
1210
+ "governorGloss": "fox",
1211
+ },
1212
+ {
1213
+ "dep": "nsubj",
1214
+ "dependent": 4,
1215
+ "dependentGloss": "fox",
1216
+ "governor": 5,
1217
+ "governorGloss": "jumps",
1218
+ },
1219
+ {
1220
+ "dep": "case",
1221
+ "dependent": 6,
1222
+ "dependentGloss": "over",
1223
+ "governor": 9,
1224
+ "governorGloss": "dog",
1225
+ },
1226
+ {
1227
+ "dep": "det",
1228
+ "dependent": 7,
1229
+ "dependentGloss": "the",
1230
+ "governor": 9,
1231
+ "governorGloss": "dog",
1232
+ },
1233
+ {
1234
+ "dep": "amod",
1235
+ "dependent": 8,
1236
+ "dependentGloss": "lazy",
1237
+ "governor": 9,
1238
+ "governorGloss": "dog",
1239
+ },
1240
+ {
1241
+ "dep": "nmod:over",
1242
+ "dependent": 9,
1243
+ "dependentGloss": "dog",
1244
+ "governor": 5,
1245
+ "governorGloss": "jumps",
1246
+ },
1247
+ ],
1248
+ "enhancedPlusPlusDependencies": [
1249
+ {
1250
+ "dep": "ROOT",
1251
+ "dependent": 5,
1252
+ "dependentGloss": "jumps",
1253
+ "governor": 0,
1254
+ "governorGloss": "ROOT",
1255
+ },
1256
+ {
1257
+ "dep": "det",
1258
+ "dependent": 1,
1259
+ "dependentGloss": "The",
1260
+ "governor": 4,
1261
+ "governorGloss": "fox",
1262
+ },
1263
+ {
1264
+ "dep": "amod",
1265
+ "dependent": 2,
1266
+ "dependentGloss": "quick",
1267
+ "governor": 4,
1268
+ "governorGloss": "fox",
1269
+ },
1270
+ {
1271
+ "dep": "amod",
1272
+ "dependent": 3,
1273
+ "dependentGloss": "brown",
1274
+ "governor": 4,
1275
+ "governorGloss": "fox",
1276
+ },
1277
+ {
1278
+ "dep": "nsubj",
1279
+ "dependent": 4,
1280
+ "dependentGloss": "fox",
1281
+ "governor": 5,
1282
+ "governorGloss": "jumps",
1283
+ },
1284
+ {
1285
+ "dep": "case",
1286
+ "dependent": 6,
1287
+ "dependentGloss": "over",
1288
+ "governor": 9,
1289
+ "governorGloss": "dog",
1290
+ },
1291
+ {
1292
+ "dep": "det",
1293
+ "dependent": 7,
1294
+ "dependentGloss": "the",
1295
+ "governor": 9,
1296
+ "governorGloss": "dog",
1297
+ },
1298
+ {
1299
+ "dep": "amod",
1300
+ "dependent": 8,
1301
+ "dependentGloss": "lazy",
1302
+ "governor": 9,
1303
+ "governorGloss": "dog",
1304
+ },
1305
+ {
1306
+ "dep": "nmod:over",
1307
+ "dependent": 9,
1308
+ "dependentGloss": "dog",
1309
+ "governor": 5,
1310
+ "governorGloss": "jumps",
1311
+ },
1312
+ ],
1313
+ "index": 0,
1314
+ "tokens": [
1315
+ {
1316
+ "after": " ",
1317
+ "before": "",
1318
+ "characterOffsetBegin": 0,
1319
+ "characterOffsetEnd": 3,
1320
+ "index": 1,
1321
+ "lemma": "the",
1322
+ "originalText": "The",
1323
+ "pos": "DT",
1324
+ "word": "The",
1325
+ },
1326
+ {
1327
+ "after": " ",
1328
+ "before": " ",
1329
+ "characterOffsetBegin": 4,
1330
+ "characterOffsetEnd": 9,
1331
+ "index": 2,
1332
+ "lemma": "quick",
1333
+ "originalText": "quick",
1334
+ "pos": "JJ",
1335
+ "word": "quick",
1336
+ },
1337
+ {
1338
+ "after": " ",
1339
+ "before": " ",
1340
+ "characterOffsetBegin": 10,
1341
+ "characterOffsetEnd": 15,
1342
+ "index": 3,
1343
+ "lemma": "brown",
1344
+ "originalText": "brown",
1345
+ "pos": "JJ",
1346
+ "word": "brown",
1347
+ },
1348
+ {
1349
+ "after": " ",
1350
+ "before": " ",
1351
+ "characterOffsetBegin": 16,
1352
+ "characterOffsetEnd": 19,
1353
+ "index": 4,
1354
+ "lemma": "fox",
1355
+ "originalText": "fox",
1356
+ "pos": "NN",
1357
+ "word": "fox",
1358
+ },
1359
+ {
1360
+ "after": " ",
1361
+ "before": " ",
1362
+ "characterOffsetBegin": 20,
1363
+ "characterOffsetEnd": 25,
1364
+ "index": 5,
1365
+ "lemma": "jump",
1366
+ "originalText": "jumps",
1367
+ "pos": "VBZ",
1368
+ "word": "jumps",
1369
+ },
1370
+ {
1371
+ "after": " ",
1372
+ "before": " ",
1373
+ "characterOffsetBegin": 26,
1374
+ "characterOffsetEnd": 30,
1375
+ "index": 6,
1376
+ "lemma": "over",
1377
+ "originalText": "over",
1378
+ "pos": "IN",
1379
+ "word": "over",
1380
+ },
1381
+ {
1382
+ "after": " ",
1383
+ "before": " ",
1384
+ "characterOffsetBegin": 31,
1385
+ "characterOffsetEnd": 34,
1386
+ "index": 7,
1387
+ "lemma": "the",
1388
+ "originalText": "the",
1389
+ "pos": "DT",
1390
+ "word": "the",
1391
+ },
1392
+ {
1393
+ "after": " ",
1394
+ "before": " ",
1395
+ "characterOffsetBegin": 35,
1396
+ "characterOffsetEnd": 39,
1397
+ "index": 8,
1398
+ "lemma": "lazy",
1399
+ "originalText": "lazy",
1400
+ "pos": "JJ",
1401
+ "word": "lazy",
1402
+ },
1403
+ {
1404
+ "after": "",
1405
+ "before": " ",
1406
+ "characterOffsetBegin": 40,
1407
+ "characterOffsetEnd": 43,
1408
+ "index": 9,
1409
+ "lemma": "dog",
1410
+ "originalText": "dog",
1411
+ "pos": "NN",
1412
+ "word": "dog",
1413
+ },
1414
+ ],
1415
+ }
1416
+ ]
1417
+ }
1418
+
1419
+ corenlp_parser.api_call = MagicMock(return_value=api_return_value)
1420
+
1421
+ input_string = "The quick brown fox jumps over the lazy dog".split()
1422
+ expected_output = Tree(
1423
+ "jumps",
1424
+ [
1425
+ Tree("fox", ["The", "quick", "brown"]),
1426
+ Tree("dog", ["over", "the", "lazy"]),
1427
+ ],
1428
+ )
1429
+
1430
+ parsed_data = next(corenlp_parser.parse(input_string))
1431
+
1432
+ corenlp_parser.api_call.assert_called_once_with(
1433
+ "The quick brown fox jumps over the lazy dog",
1434
+ properties={"ssplit.eolonly": "true"},
1435
+ )
1436
+ self.assertEqual(expected_output, parsed_data.tree())
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ from nltk.corpus import ( # mwa_ppdb
6
+ cess_cat,
7
+ cess_esp,
8
+ conll2007,
9
+ floresta,
10
+ indian,
11
+ ptb,
12
+ sinica_treebank,
13
+ udhr,
14
+ )
15
+ from nltk.tree import Tree
16
+
17
+
18
+ class TestUdhr(unittest.TestCase):
19
+ def test_words(self):
20
+ for name in udhr.fileids():
21
+ words = list(udhr.words(name))
22
+ self.assertTrue(words)
23
+
24
+ def test_raw_unicode(self):
25
+ for name in udhr.fileids():
26
+ txt = udhr.raw(name)
27
+ assert not isinstance(txt, bytes), name
28
+
29
+ def test_polish_encoding(self):
30
+ text_pl = udhr.raw("Polish-Latin2")[:164]
31
+ text_ppl = udhr.raw("Polish_Polski-Latin2")[:164]
32
+ expected = """POWSZECHNA DEKLARACJA PRAW CZŁOWIEKA
33
+ [Preamble]
34
+ Trzecia Sesja Ogólnego Zgromadzenia ONZ, obradująca w Paryżu, \
35
+ uchwaliła 10 grudnia 1948 roku jednomyślnie Powszechną"""
36
+ assert text_pl == expected, "Polish-Latin2"
37
+ assert text_ppl == expected, "Polish_Polski-Latin2"
38
+
39
+
40
+ class TestIndian(unittest.TestCase):
41
+ def test_words(self):
42
+ words = indian.words()[:3]
43
+ self.assertEqual(words, ["মহিষের", "সন্তান", ":"])
44
+
45
+ def test_tagged_words(self):
46
+ tagged_words = indian.tagged_words()[:3]
47
+ self.assertEqual(
48
+ tagged_words, [("মহিষের", "NN"), ("সন্তান", "NN"), (":", "SYM")]
49
+ )
50
+
51
+
52
+ class TestCess(unittest.TestCase):
53
+ def test_catalan(self):
54
+ words = cess_cat.words()[:15]
55
+ txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial"
56
+ self.assertEqual(words, txt.split())
57
+ self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs")
58
+
59
+ def test_esp(self):
60
+ words = cess_esp.words()[:15]
61
+ txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del"
62
+ self.assertEqual(words, txt.split())
63
+ self.assertEqual(cess_esp.words()[115], "años")
64
+
65
+
66
+ class TestFloresta(unittest.TestCase):
67
+ def test_words(self):
68
+ words = floresta.words()[:10]
69
+ txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a"
70
+ self.assertEqual(words, txt.split())
71
+
72
+
73
+ class TestSinicaTreebank(unittest.TestCase):
74
+ def test_sents(self):
75
+ first_3_sents = sinica_treebank.sents()[:3]
76
+ self.assertEqual(
77
+ first_3_sents, [["一"], ["友情"], ["嘉珍", "和", "我", "住在", "同一條", "巷子"]]
78
+ )
79
+
80
+ def test_parsed_sents(self):
81
+ parsed_sents = sinica_treebank.parsed_sents()[25]
82
+ self.assertEqual(
83
+ parsed_sents,
84
+ Tree(
85
+ "S",
86
+ [
87
+ Tree("NP", [Tree("Nba", ["嘉珍"])]),
88
+ Tree("V‧地", [Tree("VA11", ["不停"]), Tree("DE", ["的"])]),
89
+ Tree("VA4", ["哭泣"]),
90
+ ],
91
+ ),
92
+ )
93
+
94
+
95
+ class TestCoNLL2007(unittest.TestCase):
96
+ # Reading the CoNLL 2007 Dependency Treebanks
97
+
98
+ def test_sents(self):
99
+ sents = conll2007.sents("esp.train")[0]
100
+ self.assertEqual(
101
+ sents[:6], ["El", "aumento", "del", "índice", "de", "desempleo"]
102
+ )
103
+
104
+ def test_parsed_sents(self):
105
+
106
+ parsed_sents = conll2007.parsed_sents("esp.train")[0]
107
+
108
+ self.assertEqual(
109
+ parsed_sents.tree(),
110
+ Tree(
111
+ "fortaleció",
112
+ [
113
+ Tree(
114
+ "aumento",
115
+ [
116
+ "El",
117
+ Tree(
118
+ "del",
119
+ [
120
+ Tree(
121
+ "índice",
122
+ [
123
+ Tree(
124
+ "de",
125
+ [Tree("desempleo", ["estadounidense"])],
126
+ )
127
+ ],
128
+ )
129
+ ],
130
+ ),
131
+ ],
132
+ ),
133
+ "hoy",
134
+ "considerablemente",
135
+ Tree(
136
+ "al",
137
+ [
138
+ Tree(
139
+ "euro",
140
+ [
141
+ Tree(
142
+ "cotizaba",
143
+ [
144
+ ",",
145
+ "que",
146
+ Tree("a", [Tree("15.35", ["las", "GMT"])]),
147
+ "se",
148
+ Tree(
149
+ "en",
150
+ [
151
+ Tree(
152
+ "mercado",
153
+ [
154
+ "el",
155
+ Tree("de", ["divisas"]),
156
+ Tree("de", ["Fráncfort"]),
157
+ ],
158
+ )
159
+ ],
160
+ ),
161
+ Tree("a", ["0,9452_dólares"]),
162
+ Tree(
163
+ "frente_a",
164
+ [
165
+ ",",
166
+ Tree(
167
+ "0,9349_dólares",
168
+ [
169
+ "los",
170
+ Tree(
171
+ "de",
172
+ [
173
+ Tree(
174
+ "mañana",
175
+ ["esta"],
176
+ )
177
+ ],
178
+ ),
179
+ ],
180
+ ),
181
+ ],
182
+ ),
183
+ ],
184
+ )
185
+ ],
186
+ )
187
+ ],
188
+ ),
189
+ ".",
190
+ ],
191
+ ),
192
+ )
193
+
194
+
195
+ @pytest.mark.skipif(
196
+ not ptb.fileids(),
197
+ reason="A full installation of the Penn Treebank is not available",
198
+ )
199
+ class TestPTB(unittest.TestCase):
200
+ def test_fileids(self):
201
+ self.assertEqual(
202
+ ptb.fileids()[:4],
203
+ [
204
+ "BROWN/CF/CF01.MRG",
205
+ "BROWN/CF/CF02.MRG",
206
+ "BROWN/CF/CF03.MRG",
207
+ "BROWN/CF/CF04.MRG",
208
+ ],
209
+ )
210
+
211
+ def test_words(self):
212
+ self.assertEqual(
213
+ ptb.words("WSJ/00/WSJ_0003.MRG")[:7],
214
+ ["A", "form", "of", "asbestos", "once", "used", "*"],
215
+ )
216
+
217
+ def test_tagged_words(self):
218
+ self.assertEqual(
219
+ ptb.tagged_words("WSJ/00/WSJ_0003.MRG")[:3],
220
+ [("A", "DT"), ("form", "NN"), ("of", "IN")],
221
+ )
222
+
223
+ def test_categories(self):
224
+ self.assertEqual(
225
+ ptb.categories(),
226
+ [
227
+ "adventure",
228
+ "belles_lettres",
229
+ "fiction",
230
+ "humor",
231
+ "lore",
232
+ "mystery",
233
+ "news",
234
+ "romance",
235
+ "science_fiction",
236
+ ],
237
+ )
238
+
239
+ def test_news_fileids(self):
240
+ self.assertEqual(
241
+ ptb.fileids("news")[:3],
242
+ ["WSJ/00/WSJ_0001.MRG", "WSJ/00/WSJ_0002.MRG", "WSJ/00/WSJ_0003.MRG"],
243
+ )
244
+
245
+ def test_category_words(self):
246
+ self.assertEqual(
247
+ ptb.words(categories=["humor", "fiction"])[:6],
248
+ ["Thirty-three", "Scotty", "did", "not", "go", "back"],
249
+ )
250
+
251
+
252
+ @pytest.mark.skip("Skipping test for mwa_ppdb.")
253
+ class TestMWAPPDB(unittest.TestCase):
254
+ def test_fileids(self):
255
+ self.assertEqual(
256
+ mwa_ppdb.fileids(), ["ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"]
257
+ )
258
+
259
+ def test_entries(self):
260
+ self.assertEqual(
261
+ mwa_ppdb.entries()[:10],
262
+ [
263
+ ("10/17/01", "17/10/2001"),
264
+ ("102,70", "102.70"),
265
+ ("13,53", "13.53"),
266
+ ("3.2.5.3.2.1", "3.2.5.3.2.1."),
267
+ ("53,76", "53.76"),
268
+ ("6.9.5", "6.9.5."),
269
+ ("7.7.6.3", "7.7.6.3."),
270
+ ("76,20", "76.20"),
271
+ ("79,85", "79.85"),
272
+ ("93,65", "93.65"),
273
+ ],
274
+ )
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_data.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import nltk.data
4
+
5
+
6
+ def test_find_raises_exception():
7
+ with pytest.raises(LookupError):
8
+ nltk.data.find("no_such_resource/foo")
9
+
10
+
11
+ def test_find_raises_exception_with_full_resource_name():
12
+ no_such_thing = "no_such_thing/bar"
13
+ with pytest.raises(LookupError) as exc:
14
+ nltk.data.find(no_such_thing)
15
+ assert no_such_thing in str(exc)
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from nltk.metrics.agreement import AnnotationTask
4
+
5
+
6
+ class TestDisagreement(unittest.TestCase):
7
+
8
+ """
9
+ Class containing unit tests for nltk.metrics.agreement.Disagreement.
10
+ """
11
+
12
+ def test_easy(self):
13
+ """
14
+ Simple test, based on
15
+ https://github.com/foolswood/krippendorffs_alpha/raw/master/krippendorff.pdf.
16
+ """
17
+ data = [
18
+ ("coder1", "dress1", "YES"),
19
+ ("coder2", "dress1", "NO"),
20
+ ("coder3", "dress1", "NO"),
21
+ ("coder1", "dress2", "YES"),
22
+ ("coder2", "dress2", "NO"),
23
+ ("coder3", "dress3", "NO"),
24
+ ]
25
+ annotation_task = AnnotationTask(data)
26
+ self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
27
+
28
+ def test_easy2(self):
29
+ """
30
+ Same simple test with 1 rating removed.
31
+ Removal of that rating should not matter: K-Apha ignores items with
32
+ only 1 rating.
33
+ """
34
+ data = [
35
+ ("coder1", "dress1", "YES"),
36
+ ("coder2", "dress1", "NO"),
37
+ ("coder3", "dress1", "NO"),
38
+ ("coder1", "dress2", "YES"),
39
+ ("coder2", "dress2", "NO"),
40
+ ]
41
+ annotation_task = AnnotationTask(data)
42
+ self.assertAlmostEqual(annotation_task.alpha(), -0.3333333)
43
+
44
+ def test_advanced(self):
45
+ """
46
+ More advanced test, based on
47
+ http://www.agreestat.com/research_papers/onkrippendorffalpha.pdf
48
+ """
49
+ data = [
50
+ ("A", "1", "1"),
51
+ ("B", "1", "1"),
52
+ ("D", "1", "1"),
53
+ ("A", "2", "2"),
54
+ ("B", "2", "2"),
55
+ ("C", "2", "3"),
56
+ ("D", "2", "2"),
57
+ ("A", "3", "3"),
58
+ ("B", "3", "3"),
59
+ ("C", "3", "3"),
60
+ ("D", "3", "3"),
61
+ ("A", "4", "3"),
62
+ ("B", "4", "3"),
63
+ ("C", "4", "3"),
64
+ ("D", "4", "3"),
65
+ ("A", "5", "2"),
66
+ ("B", "5", "2"),
67
+ ("C", "5", "2"),
68
+ ("D", "5", "2"),
69
+ ("A", "6", "1"),
70
+ ("B", "6", "2"),
71
+ ("C", "6", "3"),
72
+ ("D", "6", "4"),
73
+ ("A", "7", "4"),
74
+ ("B", "7", "4"),
75
+ ("C", "7", "4"),
76
+ ("D", "7", "4"),
77
+ ("A", "8", "1"),
78
+ ("B", "8", "1"),
79
+ ("C", "8", "2"),
80
+ ("D", "8", "1"),
81
+ ("A", "9", "2"),
82
+ ("B", "9", "2"),
83
+ ("C", "9", "2"),
84
+ ("D", "9", "2"),
85
+ ("B", "10", "5"),
86
+ ("C", "10", "5"),
87
+ ("D", "10", "5"),
88
+ ("C", "11", "1"),
89
+ ("D", "11", "1"),
90
+ ("C", "12", "3"),
91
+ ]
92
+ annotation_task = AnnotationTask(data)
93
+ self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
94
+
95
+ def test_advanced2(self):
96
+ """
97
+ Same more advanced example, but with 1 rating removed.
98
+ Again, removal of that 1 rating should not matter.
99
+ """
100
+ data = [
101
+ ("A", "1", "1"),
102
+ ("B", "1", "1"),
103
+ ("D", "1", "1"),
104
+ ("A", "2", "2"),
105
+ ("B", "2", "2"),
106
+ ("C", "2", "3"),
107
+ ("D", "2", "2"),
108
+ ("A", "3", "3"),
109
+ ("B", "3", "3"),
110
+ ("C", "3", "3"),
111
+ ("D", "3", "3"),
112
+ ("A", "4", "3"),
113
+ ("B", "4", "3"),
114
+ ("C", "4", "3"),
115
+ ("D", "4", "3"),
116
+ ("A", "5", "2"),
117
+ ("B", "5", "2"),
118
+ ("C", "5", "2"),
119
+ ("D", "5", "2"),
120
+ ("A", "6", "1"),
121
+ ("B", "6", "2"),
122
+ ("C", "6", "3"),
123
+ ("D", "6", "4"),
124
+ ("A", "7", "4"),
125
+ ("B", "7", "4"),
126
+ ("C", "7", "4"),
127
+ ("D", "7", "4"),
128
+ ("A", "8", "1"),
129
+ ("B", "8", "1"),
130
+ ("C", "8", "2"),
131
+ ("D", "8", "1"),
132
+ ("A", "9", "2"),
133
+ ("B", "9", "2"),
134
+ ("C", "9", "2"),
135
+ ("D", "9", "2"),
136
+ ("B", "10", "5"),
137
+ ("C", "10", "5"),
138
+ ("D", "10", "5"),
139
+ ("C", "11", "1"),
140
+ ("D", "11", "1"),
141
+ ("C", "12", "3"),
142
+ ]
143
+ annotation_task = AnnotationTask(data)
144
+ self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_distance.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import pytest
4
+
5
+ from nltk.metrics.distance import edit_distance
6
+
7
+
8
+ class TestEditDistance:
9
+ @pytest.mark.parametrize(
10
+ "left,right,substitution_cost,expecteds",
11
+ [
12
+ # Allowing transpositions reduces the number of edits required.
13
+ # with transpositions:
14
+ # e.g. "abc" -T-> "cba" -D-> "ca": 2 steps
15
+ #
16
+ # without transpositions:
17
+ # e.g. "abc" -D-> "ab" -D-> "a" -I-> "ca": 3 steps
18
+ ("abc", "ca", 1, (2, 3)),
19
+ ("abc", "ca", 5, (2, 3)), # Doesn't *require* substitutions
20
+ # Note, a substition_cost of higher than 2 doesn't make much
21
+ # sense, as a deletion + insertion is identical, and always
22
+ # costs 2.
23
+ #
24
+ #
25
+ # Transpositions don't always reduce the number of edits required:
26
+ # with or without transpositions:
27
+ # e.g. "wants" -D-> "wats" -D-> "was" -I-> "wasp": 3 steps
28
+ ("wants", "wasp", 1, (3, 3)),
29
+ ("wants", "wasp", 5, (3, 3)), # Doesn't *require* substitutions
30
+ #
31
+ #
32
+ # Ought to have the same results with and without transpositions
33
+ # with or without transpositions:
34
+ # e.g. "rain" -S-> "sain" -S-> "shin" -I-> "shine": 3 steps
35
+ # (but cost 5 if substitution_cost=2)
36
+ ("rain", "shine", 1, (3, 3)),
37
+ ("rain", "shine", 2, (5, 5)), # Does *require* substitutions
38
+ #
39
+ #
40
+ # Several potentially interesting typos
41
+ # with transpositions:
42
+ # e.g. "acbdef" -T-> "abcdef": 1 step
43
+ #
44
+ # without transpositions:
45
+ # e.g. "acbdef" -D-> "abdef" -I-> "abcdef": 2 steps
46
+ ("acbdef", "abcdef", 1, (1, 2)),
47
+ ("acbdef", "abcdef", 2, (1, 2)), # Doesn't *require* substitutions
48
+ #
49
+ #
50
+ # with transpositions:
51
+ # e.g. "lnaguaeg" -T-> "languaeg" -T-> "language": 2 steps
52
+ #
53
+ # without transpositions:
54
+ # e.g. "lnaguaeg" -D-> "laguaeg" -I-> "languaeg" -D-> "languag" -I-> "language": 4 steps
55
+ ("lnaguaeg", "language", 1, (2, 4)),
56
+ ("lnaguaeg", "language", 2, (2, 4)), # Doesn't *require* substitutions
57
+ #
58
+ #
59
+ # with transpositions:
60
+ # e.g. "lnaugage" -T-> "lanugage" -T-> "language": 2 steps
61
+ #
62
+ # without transpositions:
63
+ # e.g. "lnaugage" -S-> "lnangage" -D-> "langage" -I-> "language": 3 steps
64
+ # (but one substitution, so a cost of 4 if substition_cost = 2)
65
+ ("lnaugage", "language", 1, (2, 3)),
66
+ ("lnaugage", "language", 2, (2, 4)),
67
+ # Does *require* substitutions if no transpositions
68
+ #
69
+ #
70
+ # with transpositions:
71
+ # e.g. "lngauage" -T-> "lnaguage" -T-> "language": 2 steps
72
+ # without transpositions:
73
+ # e.g. "lngauage" -I-> "lanaguage" -D-> "language": 2 steps
74
+ ("lngauage", "language", 1, (2, 2)),
75
+ ("lngauage", "language", 2, (2, 2)), # Doesn't *require* substitutions
76
+ #
77
+ #
78
+ # with or without transpositions:
79
+ # e.g. "wants" -S-> "sants" -S-> "swnts" -S-> "swits" -S-> "swims" -D-> "swim": 5 steps
80
+ #
81
+ # with substitution_cost=2 and transpositions:
82
+ # e.g. "wants" -T-> "santw" -D-> "sntw" -D-> "stw" -D-> "sw"
83
+ # -I-> "swi" -I-> "swim": 6 steps
84
+ #
85
+ # with substitution_cost=2 and no transpositions:
86
+ # e.g. "wants" -I-> "swants" -D-> "swant" -D-> "swan" -D-> "swa" -D-> "sw"
87
+ # -I-> "swi" -I-> "swim": 7 steps
88
+ ("wants", "swim", 1, (5, 5)),
89
+ ("wants", "swim", 2, (6, 7)),
90
+ #
91
+ #
92
+ # with or without transpositions:
93
+ # e.g. "kitten" -S-> "sitten" -s-> "sittin" -I-> "sitting": 3 steps
94
+ # (but cost 5 if substitution_cost=2)
95
+ ("kitten", "sitting", 1, (3, 3)),
96
+ ("kitten", "sitting", 2, (5, 5)),
97
+ #
98
+ # duplicated letter
99
+ # e.g. "duplicated" -D-> "duplicated"
100
+ ("duplicated", "duuplicated", 1, (1, 1)),
101
+ ("duplicated", "duuplicated", 2, (1, 1)),
102
+ ("very duplicated", "very duuplicateed", 2, (2, 2)),
103
+ ],
104
+ )
105
+ def test_with_transpositions(
106
+ self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int]
107
+ ):
108
+ """
109
+ Test `edit_distance` between two strings, given some `substitution_cost`,
110
+ and whether transpositions are allowed.
111
+
112
+ :param str left: First input string to `edit_distance`.
113
+ :param str right: Second input string to `edit_distance`.
114
+ :param int substitution_cost: The cost of a substitution action in `edit_distance`.
115
+ :param Tuple[int, int] expecteds: A tuple of expected outputs, such that `expecteds[0]` is
116
+ the expected output with `transpositions=True`, and `expecteds[1]` is
117
+ the expected output with `transpositions=False`.
118
+ """
119
+ # Test the input strings in both orderings
120
+ for s1, s2 in ((left, right), (right, left)):
121
+ # zip with [True, False] to get the transpositions value
122
+ for expected, transpositions in zip(expecteds, [True, False]):
123
+ predicted = edit_distance(
124
+ s1,
125
+ s2,
126
+ substitution_cost=substitution_cost,
127
+ transpositions=transpositions,
128
+ )
129
+ assert predicted == expected