diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bd6099308429499bcb2237f0f1715d1fbee633f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a7902c6d0667ae4af6b0852105815f9f0fa8be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67c611fd69b696d16ced61bac0a6657ffac8df6e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feca3f0e20c7c8413402a8dac3121ed57579ae0d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a196c5d92d5c9f1268f39d9ab27e22496dd8e4b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4572efaf8ad3dae36351f54673b4a4a615bb1ad6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86b5548e631f380e5ec72059ff05538889a88911 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f35fb07da5a50f566f0bed75e26eb52e3a412fbb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d4ad3e2adb1c3a613d105e74d3344baf0bf35cf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d1afa5da3087d7b8f9174c8ee49e89735fbd4b1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0efc65a0e819c221643ca439bd96a6816ed1abc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7076cb799809deb2d9e28f0b4cad58245ca77317 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2ab0c55be86a370b8456c2c8c885c0de4921b6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab7ba72779f38623834be990c8708ff6495543d6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1286d6546163e2adbbe5fa1130a5ac0cdc34e5b2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..170b576a9d01cbfb04912f680c97539ac85e51a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e06285dd1392adf8228609c896daacfb02cb1485 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f179b9d2ee1726ddb1136a63ccdb680d1e0827f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96c08a7c3d52ac9ffe3a6bb0fc8ed8b14bf7c4fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9962057f9b35fbd0c32fb45bd5026a637255fd8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49984faf3a83aa43de09a23cc076dceff410ab21 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25260d7f18a0088266e5eb5bcfbe35b07d39a1c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29e69e3257cdc267aec1ba72428d0aeaf355741d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62fbf3e2c5b858d07b694749ae75705aed3214f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04511c0d46852cc493d3278298d3bb8f67d2e145 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..898ed0570fa8704e96c0ee42e0b95f4e986885b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3b3c86e9e69cf0ad85a266bc47fde748f81b6c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58c5cdf21a52b9df016687ecc85a295c4322ede8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b54027e3d1ae59a0e8883c32671cf18b02e4e02 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee43ac1e717127fc746bdd9119a4aeeaa363ae4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c4928a2540fb5d59588691cafa9657c58d26377 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39aeb747c701f6118373076535fd3bf16dada45a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0220c37e01bcc427871758f88017b9320cba2807 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8802ae3e21ad3209e79f43769d1f4c4106db60a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9592559d3e25e1edffea422384b1c42aab41a776 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dbe9f64762473c486d3dbf6071ea160885cb6b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..866bbdb29b57f40648269b6601754d70918ff7f8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_aline.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_aline.py new file mode 100644 index 0000000000000000000000000000000000000000..68cb55f74809ac3cb53e8dfd56be8706a656f0fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_aline.py @@ -0,0 +1,48 @@ +""" +Test Aline algorithm for aligning phonetic sequences +""" +from nltk.metrics import aline + + +def test_aline(): + result = aline.align("θin", "tenwis") + expected = [[("θ", "t"), ("i", "e"), ("n", "n")]] + + assert result == expected + + result = aline.align("jo", "ʒə") + expected = [[("j", "ʒ"), ("o", "ə")]] + + assert result == expected + + result = aline.align("pematesiweni", "pematesewen") + expected = [ + [ + ("p", "p"), + ("e", "e"), + ("m", "m"), + ("a", "a"), + ("t", "t"), + ("e", "e"), + ("s", "s"), + ("i", "e"), + ("w", "w"), + ("e", "e"), + ("n", "n"), + ] + ] + + assert result == expected + + result = aline.align("tuwθ", "dentis") + expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]] + + assert result == expected + + +def test_aline_delta(): + """ + Test aline for computing the difference between two segments + """ + assert aline.delta("p", "q") == 20.0 + assert aline.delta("a", "A") == 0.0 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_brill.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_brill.py new file mode 100644 index 0000000000000000000000000000000000000000..cea8a854ea27b37bd9cadb4493e4dfc4ddb46cf5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_brill.py @@ -0,0 +1,34 @@ +""" +Tests for Brill tagger. +""" + +import unittest + +from nltk.corpus import treebank +from nltk.tag import UnigramTagger, brill, brill_trainer +from nltk.tbl import demo + + +class TestBrill(unittest.TestCase): + def test_pos_template(self): + train_sents = treebank.tagged_sents()[:1000] + tagger = UnigramTagger(train_sents) + trainer = brill_trainer.BrillTaggerTrainer( + tagger, [brill.Template(brill.Pos([-1]))] + ) + brill_tagger = trainer.train(train_sents) + # Example from https://github.com/nltk/nltk/issues/769 + result = brill_tagger.tag("This is a foo bar sentence".split()) + expected = [ + ("This", "DT"), + ("is", "VBZ"), + ("a", "DT"), + ("foo", None), + ("bar", "NN"), + ("sentence", None), + ] + self.assertEqual(result, expected) + + @unittest.skip("Should be tested in __main__ of nltk.tbl.demo") + def test_brill_demo(self): + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..8952f1f35fe7f78d96b92cc4c377dbd1d387aaf0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py @@ -0,0 +1,39 @@ +import unittest + +import pytest + +from nltk import ConditionalFreqDist, tokenize + + +class TestEmptyCondFreq(unittest.TestCase): + def test_tabulate(self): + empty = ConditionalFreqDist() + self.assertEqual(empty.conditions(), []) + with pytest.raises(ValueError): + empty.tabulate(conditions="BUG") # nonexistent keys shouldn't be added + self.assertEqual(empty.conditions(), []) + + def test_plot(self): + empty = ConditionalFreqDist() + self.assertEqual(empty.conditions(), []) + empty.plot(conditions=["BUG"]) # nonexistent keys shouldn't be added + self.assertEqual(empty.conditions(), []) + + def test_increment(self): + # make sure that we can still mutate cfd normally + text = "cow cat mouse cat tiger" + cfd = ConditionalFreqDist() + + # create cfd with word length as condition + for word in tokenize.word_tokenize(text): + condition = len(word) + cfd[condition][word] += 1 + + self.assertEqual(cfd.conditions(), [3, 5]) + + # incrementing previously unseen key is still possible + cfd[2]["hi"] += 1 + self.assertCountEqual(cfd.conditions(), [3, 5, 2]) # new condition added + self.assertEqual( + cfd[2]["hi"], 1 + ) # key's frequency incremented from 0 (unseen) to 1 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9f24d245d5c9dfd4c4d507237651407d2cc444 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py @@ -0,0 +1,49 @@ +import unittest + +import nltk +from nltk.grammar import CFG + + +class ChomskyNormalFormForCFGTest(unittest.TestCase): + def test_simple(self): + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N | NP PP P + VP -> V NP | VP PP + VP -> Det + Det -> 'a' | 'the' + N -> 'dog' | 'cat' + V -> 'chased' | 'sat' + P -> 'on' | 'in' + """ + ) + self.assertFalse(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + grammar = grammar.chomsky_normal_form(flexible=True) + self.assertTrue(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + + grammar2 = CFG.fromstring( + """ + S -> NP VP + NP -> VP N P + VP -> P + N -> 'dog' | 'cat' + P -> 'on' | 'in' + """ + ) + self.assertFalse(grammar2.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar2.is_chomsky_normal_form()) + grammar2 = grammar2.chomsky_normal_form() + self.assertTrue(grammar2.is_flexible_chomsky_normal_form()) + self.assertTrue(grammar2.is_chomsky_normal_form()) + + def test_complex(self): + grammar = nltk.data.load("grammars/large_grammars/atis.cfg") + self.assertFalse(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + grammar = grammar.chomsky_normal_form(flexible=True) + self.assertTrue(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..60b56317f2b5cae224b906f0c71458144a74f6a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py @@ -0,0 +1,85 @@ +import unittest + +from nltk import RegexpParser + + +class TestChunkRule(unittest.TestCase): + def test_tag_pattern2re_pattern_quantifier(self): + """Test for bug https://github.com/nltk/nltk/issues/1597 + + Ensures that curly bracket quantifiers can be used inside a chunk rule. + This type of quantifier has been used for the supplementary example + in https://www.nltk.org/book/ch07.html#exploring-text-corpora. + """ + sent = [ + ("The", "AT"), + ("September-October", "NP"), + ("term", "NN"), + ("jury", "NN"), + ("had", "HVD"), + ("been", "BEN"), + ("charged", "VBN"), + ("by", "IN"), + ("Fulton", "NP-TL"), + ("Superior", "JJ-TL"), + ("Court", "NN-TL"), + ("Judge", "NN-TL"), + ("Durwood", "NP"), + ("Pye", "NP"), + ("to", "TO"), + ("investigate", "VB"), + ("reports", "NNS"), + ("of", "IN"), + ("possible", "JJ"), + ("``", "``"), + ("irregularities", "NNS"), + ("''", "''"), + ("in", "IN"), + ("the", "AT"), + ("hard-fought", "JJ"), + ("primary", "NN"), + ("which", "WDT"), + ("was", "BEDZ"), + ("won", "VBN"), + ("by", "IN"), + ("Mayor-nominate", "NN-TL"), + ("Ivan", "NP"), + ("Allen", "NP"), + ("Jr.", "NP"), + (".", "."), + ] # source: brown corpus + cp = RegexpParser("CHUNK: {{4,}}") + tree = cp.parse(sent) + assert ( + tree.pformat() + == """(S + The/AT + September-October/NP + term/NN + jury/NN + had/HVD + been/BEN + charged/VBN + by/IN + Fulton/NP-TL + Superior/JJ-TL + (CHUNK Court/NN-TL Judge/NN-TL Durwood/NP Pye/NP) + to/TO + investigate/VB + reports/NNS + of/IN + possible/JJ + ``/`` + irregularities/NNS + ''/'' + in/IN + the/AT + hard-fought/JJ + primary/NN + which/WDT + was/BEDZ + won/VBN + by/IN + (CHUNK Mayor-nominate/NN-TL Ivan/NP Allen/NP Jr./NP) + ./.)""" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_classify.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_classify.py new file mode 100644 index 0000000000000000000000000000000000000000..4e21a6cf4aa119e6696a9d2ba618ff08220b0fac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_classify.py @@ -0,0 +1,49 @@ +""" +Unit tests for nltk.classify. See also: nltk/test/classify.doctest +""" +import pytest + +from nltk import classify + +TRAIN = [ + (dict(a=1, b=1, c=1), "y"), + (dict(a=1, b=1, c=1), "x"), + (dict(a=1, b=1, c=0), "y"), + (dict(a=0, b=1, c=1), "x"), + (dict(a=0, b=1, c=1), "y"), + (dict(a=0, b=0, c=1), "y"), + (dict(a=0, b=1, c=0), "x"), + (dict(a=0, b=0, c=0), "x"), + (dict(a=0, b=1, c=1), "y"), +] + +TEST = [ + (dict(a=1, b=0, c=1)), # unseen + (dict(a=1, b=0, c=0)), # unseen + (dict(a=0, b=1, c=1)), # seen 3 times, labels=y,y,x + (dict(a=0, b=1, c=0)), # seen 1 time, label=x +] + +RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)] + + +def assert_classifier_correct(algorithm): + try: + classifier = classify.MaxentClassifier.train( + TRAIN, algorithm, trace=0, max_iter=1000 + ) + except (LookupError, AttributeError) as e: + pytest.skip(str(e)) + + for (px, py), featureset in zip(RESULTS, TEST): + pdist = classifier.prob_classify(featureset) + assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px) + assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py) + + +def test_megam(): + assert_classifier_correct("MEGAM") + + +def test_tadm(): + assert_classifier_correct("TADM") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py new file mode 100644 index 0000000000000000000000000000000000000000..02fc5f35a4901598617a71c266ef0448c27694c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py @@ -0,0 +1,98 @@ +import contextlib +import sys +import unittest +from io import StringIO + +from nltk.corpus import gutenberg +from nltk.text import Text + + +@contextlib.contextmanager +def stdout_redirect(where): + sys.stdout = where + try: + yield where + finally: + sys.stdout = sys.__stdout__ + + +class TestConcordance(unittest.TestCase): + """Text constructed using: https://www.nltk.org/book/ch01.html""" + + @classmethod + def setUpClass(cls): + cls.corpus = gutenberg.words("melville-moby_dick.txt") + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + self.text = Text(TestConcordance.corpus) + self.query = "monstrous" + self.maxDiff = None + self.list_out = [ + "ong the former , one was of a most monstrous size . ... This came towards us , ", + 'ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r', + "ll over with a heathenish array of monstrous clubs and spears . Some were thick", + "d as you gazed , and wondered what monstrous cannibal and savage could ever hav", + "that has survived the flood ; most monstrous and most mountainous ! That Himmal", + "they might scout at Moby Dick as a monstrous fable , or still worse and more de", + "th of Radney .'\" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l", + "ing Scenes . In connexion with the monstrous pictures of whales , I am strongly", + "ere to enter upon those still more monstrous stories of them which are to be fo", + "ght have been rummaged out of this monstrous cabinet there is no telling . But ", + "of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u", + ] + + def tearDown(self): + pass + + def test_concordance_list(self): + concordance_out = self.text.concordance_list(self.query) + self.assertEqual(self.list_out, [c.line for c in concordance_out]) + + def test_concordance_width(self): + list_out = [ + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "Monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + ] + + concordance_out = self.text.concordance_list(self.query, width=0) + self.assertEqual(list_out, [c.query for c in concordance_out]) + + def test_concordance_lines(self): + concordance_out = self.text.concordance_list(self.query, lines=3) + self.assertEqual(self.list_out[:3], [c.line for c in concordance_out]) + + def test_concordance_print(self): + print_out = """Displaying 11 of 11 matches: + ong the former , one was of a most monstrous size . ... This came towards us , + ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r + ll over with a heathenish array of monstrous clubs and spears . Some were thick + d as you gazed , and wondered what monstrous cannibal and savage could ever hav + that has survived the flood ; most monstrous and most mountainous ! That Himmal + they might scout at Moby Dick as a monstrous fable , or still worse and more de + th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l + ing Scenes . In connexion with the monstrous pictures of whales , I am strongly + ere to enter upon those still more monstrous stories of them which are to be fo + ght have been rummaged out of this monstrous cabinet there is no telling . But + of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u + """ + + with stdout_redirect(StringIO()) as stdout: + self.text.concordance(self.query) + + def strip_space(raw_str): + return raw_str.replace(" ", "") + + self.assertEqual(strip_space(print_out), strip_space(stdout.getvalue())) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0024b11470c1bee501c898ff508622e783287f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py @@ -0,0 +1,1436 @@ +""" +Mock test for Stanford CoreNLP wrappers. +""" + +from unittest import TestCase +from unittest.mock import MagicMock + +import pytest + +from nltk.parse import corenlp +from nltk.tree import Tree + + +def setup_module(module): + global server + + try: + server = corenlp.CoreNLPServer(port=9000) + except LookupError: + pytest.skip("Could not instantiate CoreNLPServer.") + + try: + server.start() + except corenlp.CoreNLPServerError as e: + pytest.skip( + "Skipping CoreNLP tests because the server could not be started. " + "Make sure that the 9000 port is free. " + "{}".format(e.strerror) + ) + + +def teardown_module(module): + server.stop() + + +class TestTokenizerAPI(TestCase): + def test_tokenize(self): + corenlp_tokenizer = corenlp.CoreNLPParser() + + api_return_value = { + "sentences": [ + { + "index": 0, + "tokens": [ + { + "after": " ", + "before": "", + "characterOffsetBegin": 0, + "characterOffsetEnd": 4, + "index": 1, + "originalText": "Good", + "word": "Good", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 5, + "characterOffsetEnd": 12, + "index": 2, + "originalText": "muffins", + "word": "muffins", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 13, + "characterOffsetEnd": 17, + "index": 3, + "originalText": "cost", + "word": "cost", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 18, + "characterOffsetEnd": 19, + "index": 4, + "originalText": "$", + "word": "$", + }, + { + "after": "\n", + "before": "", + "characterOffsetBegin": 19, + "characterOffsetEnd": 23, + "index": 5, + "originalText": "3.88", + "word": "3.88", + }, + { + "after": " ", + "before": "\n", + "characterOffsetBegin": 24, + "characterOffsetEnd": 26, + "index": 6, + "originalText": "in", + "word": "in", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 27, + "characterOffsetEnd": 30, + "index": 7, + "originalText": "New", + "word": "New", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 31, + "characterOffsetEnd": 35, + "index": 8, + "originalText": "York", + "word": "York", + }, + { + "after": " ", + "before": "", + "characterOffsetBegin": 35, + "characterOffsetEnd": 36, + "index": 9, + "originalText": ".", + "word": ".", + }, + ], + }, + { + "index": 1, + "tokens": [ + { + "after": " ", + "before": " ", + "characterOffsetBegin": 38, + "characterOffsetEnd": 44, + "index": 1, + "originalText": "Please", + "word": "Please", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 45, + "characterOffsetEnd": 48, + "index": 2, + "originalText": "buy", + "word": "buy", + }, + { + "after": "\n", + "before": " ", + "characterOffsetBegin": 49, + "characterOffsetEnd": 51, + "index": 3, + "originalText": "me", + "word": "me", + }, + { + "after": " ", + "before": "\n", + "characterOffsetBegin": 52, + "characterOffsetEnd": 55, + "index": 4, + "originalText": "two", + "word": "two", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 56, + "characterOffsetEnd": 58, + "index": 5, + "originalText": "of", + "word": "of", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 59, + "characterOffsetEnd": 63, + "index": 6, + "originalText": "them", + "word": "them", + }, + { + "after": "\n", + "before": "", + "characterOffsetBegin": 63, + "characterOffsetEnd": 64, + "index": 7, + "originalText": ".", + "word": ".", + }, + ], + }, + { + "index": 2, + "tokens": [ + { + "after": "", + "before": "\n", + "characterOffsetBegin": 65, + "characterOffsetEnd": 71, + "index": 1, + "originalText": "Thanks", + "word": "Thanks", + }, + { + "after": "", + "before": "", + "characterOffsetBegin": 71, + "characterOffsetEnd": 72, + "index": 2, + "originalText": ".", + "word": ".", + }, + ], + }, + ] + } + corenlp_tokenizer.api_call = MagicMock(return_value=api_return_value) + + input_string = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks." + + expected_output = [ + "Good", + "muffins", + "cost", + "$", + "3.88", + "in", + "New", + "York", + ".", + "Please", + "buy", + "me", + "two", + "of", + "them", + ".", + "Thanks", + ".", + ] + + tokenized_output = list(corenlp_tokenizer.tokenize(input_string)) + + corenlp_tokenizer.api_call.assert_called_once_with( + "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.", + properties={"annotators": "tokenize,ssplit"}, + ) + self.assertEqual(expected_output, tokenized_output) + + +class TestTaggerAPI(TestCase): + def test_pos_tagger(self): + corenlp_tagger = corenlp.CoreNLPParser(tagtype="pos") + + api_return_value = { + "sentences": [ + { + "basicDependencies": [ + { + "dep": "ROOT", + "dependent": 1, + "dependentGloss": "What", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "cop", + "dependent": 2, + "dependentGloss": "is", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "det", + "dependent": 3, + "dependentGloss": "the", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "airspeed", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "case", + "dependent": 5, + "dependentGloss": "of", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "det", + "dependent": 6, + "dependentGloss": "an", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "compound", + "dependent": 7, + "dependentGloss": "unladen", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "nmod", + "dependent": 8, + "dependentGloss": "swallow", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "punct", + "dependent": 9, + "dependentGloss": "?", + "governor": 1, + "governorGloss": "What", + }, + ], + "enhancedDependencies": [ + { + "dep": "ROOT", + "dependent": 1, + "dependentGloss": "What", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "cop", + "dependent": 2, + "dependentGloss": "is", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "det", + "dependent": 3, + "dependentGloss": "the", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "airspeed", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "case", + "dependent": 5, + "dependentGloss": "of", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "det", + "dependent": 6, + "dependentGloss": "an", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "compound", + "dependent": 7, + "dependentGloss": "unladen", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "nmod:of", + "dependent": 8, + "dependentGloss": "swallow", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "punct", + "dependent": 9, + "dependentGloss": "?", + "governor": 1, + "governorGloss": "What", + }, + ], + "enhancedPlusPlusDependencies": [ + { + "dep": "ROOT", + "dependent": 1, + "dependentGloss": "What", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "cop", + "dependent": 2, + "dependentGloss": "is", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "det", + "dependent": 3, + "dependentGloss": "the", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "airspeed", + "governor": 1, + "governorGloss": "What", + }, + { + "dep": "case", + "dependent": 5, + "dependentGloss": "of", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "det", + "dependent": 6, + "dependentGloss": "an", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "compound", + "dependent": 7, + "dependentGloss": "unladen", + "governor": 8, + "governorGloss": "swallow", + }, + { + "dep": "nmod:of", + "dependent": 8, + "dependentGloss": "swallow", + "governor": 4, + "governorGloss": "airspeed", + }, + { + "dep": "punct", + "dependent": 9, + "dependentGloss": "?", + "governor": 1, + "governorGloss": "What", + }, + ], + "index": 0, + "parse": "(ROOT\n (SBARQ\n (WHNP (WP What))\n (SQ (VBZ is)\n (NP\n (NP (DT the) (NN airspeed))\n (PP (IN of)\n (NP (DT an) (NN unladen) (NN swallow)))))\n (. ?)))", + "tokens": [ + { + "after": " ", + "before": "", + "characterOffsetBegin": 0, + "characterOffsetEnd": 4, + "index": 1, + "lemma": "what", + "originalText": "What", + "pos": "WP", + "word": "What", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 5, + "characterOffsetEnd": 7, + "index": 2, + "lemma": "be", + "originalText": "is", + "pos": "VBZ", + "word": "is", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 8, + "characterOffsetEnd": 11, + "index": 3, + "lemma": "the", + "originalText": "the", + "pos": "DT", + "word": "the", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 12, + "characterOffsetEnd": 20, + "index": 4, + "lemma": "airspeed", + "originalText": "airspeed", + "pos": "NN", + "word": "airspeed", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 21, + "characterOffsetEnd": 23, + "index": 5, + "lemma": "of", + "originalText": "of", + "pos": "IN", + "word": "of", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 24, + "characterOffsetEnd": 26, + "index": 6, + "lemma": "a", + "originalText": "an", + "pos": "DT", + "word": "an", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 27, + "characterOffsetEnd": 34, + "index": 7, + "lemma": "unladen", + "originalText": "unladen", + "pos": "JJ", + "word": "unladen", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 35, + "characterOffsetEnd": 42, + "index": 8, + "lemma": "swallow", + "originalText": "swallow", + "pos": "VB", + "word": "swallow", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 43, + "characterOffsetEnd": 44, + "index": 9, + "lemma": "?", + "originalText": "?", + "pos": ".", + "word": "?", + }, + ], + } + ] + } + corenlp_tagger.api_call = MagicMock(return_value=api_return_value) + + input_tokens = "What is the airspeed of an unladen swallow ?".split() + expected_output = [ + ("What", "WP"), + ("is", "VBZ"), + ("the", "DT"), + ("airspeed", "NN"), + ("of", "IN"), + ("an", "DT"), + ("unladen", "JJ"), + ("swallow", "VB"), + ("?", "."), + ] + tagged_output = corenlp_tagger.tag(input_tokens) + + corenlp_tagger.api_call.assert_called_once_with( + "What is the airspeed of an unladen swallow ?", + properties={ + "ssplit.isOneSentence": "true", + "annotators": "tokenize,ssplit,pos", + }, + ) + self.assertEqual(expected_output, tagged_output) + + def test_ner_tagger(self): + corenlp_tagger = corenlp.CoreNLPParser(tagtype="ner") + + api_return_value = { + "sentences": [ + { + "index": 0, + "tokens": [ + { + "after": " ", + "before": "", + "characterOffsetBegin": 0, + "characterOffsetEnd": 4, + "index": 1, + "lemma": "Rami", + "ner": "PERSON", + "originalText": "Rami", + "pos": "NNP", + "word": "Rami", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 5, + "characterOffsetEnd": 8, + "index": 2, + "lemma": "Eid", + "ner": "PERSON", + "originalText": "Eid", + "pos": "NNP", + "word": "Eid", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 9, + "characterOffsetEnd": 11, + "index": 3, + "lemma": "be", + "ner": "O", + "originalText": "is", + "pos": "VBZ", + "word": "is", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 12, + "characterOffsetEnd": 20, + "index": 4, + "lemma": "study", + "ner": "O", + "originalText": "studying", + "pos": "VBG", + "word": "studying", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 21, + "characterOffsetEnd": 23, + "index": 5, + "lemma": "at", + "ner": "O", + "originalText": "at", + "pos": "IN", + "word": "at", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 24, + "characterOffsetEnd": 29, + "index": 6, + "lemma": "Stony", + "ner": "ORGANIZATION", + "originalText": "Stony", + "pos": "NNP", + "word": "Stony", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 30, + "characterOffsetEnd": 35, + "index": 7, + "lemma": "Brook", + "ner": "ORGANIZATION", + "originalText": "Brook", + "pos": "NNP", + "word": "Brook", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 36, + "characterOffsetEnd": 46, + "index": 8, + "lemma": "University", + "ner": "ORGANIZATION", + "originalText": "University", + "pos": "NNP", + "word": "University", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 47, + "characterOffsetEnd": 49, + "index": 9, + "lemma": "in", + "ner": "O", + "originalText": "in", + "pos": "IN", + "word": "in", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 50, + "characterOffsetEnd": 52, + "index": 10, + "lemma": "NY", + "ner": "O", + "originalText": "NY", + "pos": "NNP", + "word": "NY", + }, + ], + } + ] + } + + corenlp_tagger.api_call = MagicMock(return_value=api_return_value) + + input_tokens = "Rami Eid is studying at Stony Brook University in NY".split() + expected_output = [ + ("Rami", "PERSON"), + ("Eid", "PERSON"), + ("is", "O"), + ("studying", "O"), + ("at", "O"), + ("Stony", "ORGANIZATION"), + ("Brook", "ORGANIZATION"), + ("University", "ORGANIZATION"), + ("in", "O"), + ("NY", "O"), + ] + tagged_output = corenlp_tagger.tag(input_tokens) + + corenlp_tagger.api_call.assert_called_once_with( + "Rami Eid is studying at Stony Brook University in NY", + properties={ + "ssplit.isOneSentence": "true", + "annotators": "tokenize,ssplit,ner", + }, + ) + self.assertEqual(expected_output, tagged_output) + + def test_unexpected_tagtype(self): + with self.assertRaises(ValueError): + corenlp_tagger = corenlp.CoreNLPParser(tagtype="test") + + +class TestParserAPI(TestCase): + def test_parse(self): + corenlp_parser = corenlp.CoreNLPParser() + + api_return_value = { + "sentences": [ + { + "basicDependencies": [ + { + "dep": "ROOT", + "dependent": 4, + "dependentGloss": "fox", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "dep", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "enhancedDependencies": [ + { + "dep": "ROOT", + "dependent": 4, + "dependentGloss": "fox", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "dep", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod:over", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "enhancedPlusPlusDependencies": [ + { + "dep": "ROOT", + "dependent": 4, + "dependentGloss": "fox", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "dep", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod:over", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "index": 0, + "parse": "(ROOT\n (NP\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\n (NP\n (NP (NNS jumps))\n (PP (IN over)\n (NP (DT the) (JJ lazy) (NN dog))))))", + "tokens": [ + { + "after": " ", + "before": "", + "characterOffsetBegin": 0, + "characterOffsetEnd": 3, + "index": 1, + "lemma": "the", + "originalText": "The", + "pos": "DT", + "word": "The", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 4, + "characterOffsetEnd": 9, + "index": 2, + "lemma": "quick", + "originalText": "quick", + "pos": "JJ", + "word": "quick", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 10, + "characterOffsetEnd": 15, + "index": 3, + "lemma": "brown", + "originalText": "brown", + "pos": "JJ", + "word": "brown", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 16, + "characterOffsetEnd": 19, + "index": 4, + "lemma": "fox", + "originalText": "fox", + "pos": "NN", + "word": "fox", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 20, + "characterOffsetEnd": 25, + "index": 5, + "lemma": "jump", + "originalText": "jumps", + "pos": "VBZ", + "word": "jumps", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 26, + "characterOffsetEnd": 30, + "index": 6, + "lemma": "over", + "originalText": "over", + "pos": "IN", + "word": "over", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 31, + "characterOffsetEnd": 34, + "index": 7, + "lemma": "the", + "originalText": "the", + "pos": "DT", + "word": "the", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 35, + "characterOffsetEnd": 39, + "index": 8, + "lemma": "lazy", + "originalText": "lazy", + "pos": "JJ", + "word": "lazy", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 40, + "characterOffsetEnd": 43, + "index": 9, + "lemma": "dog", + "originalText": "dog", + "pos": "NN", + "word": "dog", + }, + ], + } + ] + } + + corenlp_parser.api_call = MagicMock(return_value=api_return_value) + + input_string = "The quick brown fox jumps over the lazy dog".split() + expected_output = Tree( + "ROOT", + [ + Tree( + "NP", + [ + Tree( + "NP", + [ + Tree("DT", ["The"]), + Tree("JJ", ["quick"]), + Tree("JJ", ["brown"]), + Tree("NN", ["fox"]), + ], + ), + Tree( + "NP", + [ + Tree("NP", [Tree("NNS", ["jumps"])]), + Tree( + "PP", + [ + Tree("IN", ["over"]), + Tree( + "NP", + [ + Tree("DT", ["the"]), + Tree("JJ", ["lazy"]), + Tree("NN", ["dog"]), + ], + ), + ], + ), + ], + ), + ], + ) + ], + ) + + parsed_data = next(corenlp_parser.parse(input_string)) + + corenlp_parser.api_call.assert_called_once_with( + "The quick brown fox jumps over the lazy dog", + properties={"ssplit.eolonly": "true"}, + ) + self.assertEqual(expected_output, parsed_data) + + def test_dependency_parser(self): + corenlp_parser = corenlp.CoreNLPDependencyParser() + + api_return_value = { + "sentences": [ + { + "basicDependencies": [ + { + "dep": "ROOT", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "fox", + "governor": 5, + "governorGloss": "jumps", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "enhancedDependencies": [ + { + "dep": "ROOT", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "fox", + "governor": 5, + "governorGloss": "jumps", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod:over", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "enhancedPlusPlusDependencies": [ + { + "dep": "ROOT", + "dependent": 5, + "dependentGloss": "jumps", + "governor": 0, + "governorGloss": "ROOT", + }, + { + "dep": "det", + "dependent": 1, + "dependentGloss": "The", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 2, + "dependentGloss": "quick", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "amod", + "dependent": 3, + "dependentGloss": "brown", + "governor": 4, + "governorGloss": "fox", + }, + { + "dep": "nsubj", + "dependent": 4, + "dependentGloss": "fox", + "governor": 5, + "governorGloss": "jumps", + }, + { + "dep": "case", + "dependent": 6, + "dependentGloss": "over", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "det", + "dependent": 7, + "dependentGloss": "the", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "amod", + "dependent": 8, + "dependentGloss": "lazy", + "governor": 9, + "governorGloss": "dog", + }, + { + "dep": "nmod:over", + "dependent": 9, + "dependentGloss": "dog", + "governor": 5, + "governorGloss": "jumps", + }, + ], + "index": 0, + "tokens": [ + { + "after": " ", + "before": "", + "characterOffsetBegin": 0, + "characterOffsetEnd": 3, + "index": 1, + "lemma": "the", + "originalText": "The", + "pos": "DT", + "word": "The", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 4, + "characterOffsetEnd": 9, + "index": 2, + "lemma": "quick", + "originalText": "quick", + "pos": "JJ", + "word": "quick", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 10, + "characterOffsetEnd": 15, + "index": 3, + "lemma": "brown", + "originalText": "brown", + "pos": "JJ", + "word": "brown", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 16, + "characterOffsetEnd": 19, + "index": 4, + "lemma": "fox", + "originalText": "fox", + "pos": "NN", + "word": "fox", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 20, + "characterOffsetEnd": 25, + "index": 5, + "lemma": "jump", + "originalText": "jumps", + "pos": "VBZ", + "word": "jumps", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 26, + "characterOffsetEnd": 30, + "index": 6, + "lemma": "over", + "originalText": "over", + "pos": "IN", + "word": "over", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 31, + "characterOffsetEnd": 34, + "index": 7, + "lemma": "the", + "originalText": "the", + "pos": "DT", + "word": "the", + }, + { + "after": " ", + "before": " ", + "characterOffsetBegin": 35, + "characterOffsetEnd": 39, + "index": 8, + "lemma": "lazy", + "originalText": "lazy", + "pos": "JJ", + "word": "lazy", + }, + { + "after": "", + "before": " ", + "characterOffsetBegin": 40, + "characterOffsetEnd": 43, + "index": 9, + "lemma": "dog", + "originalText": "dog", + "pos": "NN", + "word": "dog", + }, + ], + } + ] + } + + corenlp_parser.api_call = MagicMock(return_value=api_return_value) + + input_string = "The quick brown fox jumps over the lazy dog".split() + expected_output = Tree( + "jumps", + [ + Tree("fox", ["The", "quick", "brown"]), + Tree("dog", ["over", "the", "lazy"]), + ], + ) + + parsed_data = next(corenlp_parser.parse(input_string)) + + corenlp_parser.api_call.assert_called_once_with( + "The quick brown fox jumps over the lazy dog", + properties={"ssplit.eolonly": "true"}, + ) + self.assertEqual(expected_output, parsed_data.tree()) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py new file mode 100644 index 0000000000000000000000000000000000000000..888dd20b5af2f798d966d0a138d4c453675ae0f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py @@ -0,0 +1,274 @@ +import unittest + +import pytest + +from nltk.corpus import ( # mwa_ppdb + cess_cat, + cess_esp, + conll2007, + floresta, + indian, + ptb, + sinica_treebank, + udhr, +) +from nltk.tree import Tree + + +class TestUdhr(unittest.TestCase): + def test_words(self): + for name in udhr.fileids(): + words = list(udhr.words(name)) + self.assertTrue(words) + + def test_raw_unicode(self): + for name in udhr.fileids(): + txt = udhr.raw(name) + assert not isinstance(txt, bytes), name + + def test_polish_encoding(self): + text_pl = udhr.raw("Polish-Latin2")[:164] + text_ppl = udhr.raw("Polish_Polski-Latin2")[:164] + expected = """POWSZECHNA DEKLARACJA PRAW CZŁOWIEKA +[Preamble] +Trzecia Sesja Ogólnego Zgromadzenia ONZ, obradująca w Paryżu, \ +uchwaliła 10 grudnia 1948 roku jednomyślnie Powszechną""" + assert text_pl == expected, "Polish-Latin2" + assert text_ppl == expected, "Polish_Polski-Latin2" + + +class TestIndian(unittest.TestCase): + def test_words(self): + words = indian.words()[:3] + self.assertEqual(words, ["মহিষের", "সন্তান", ":"]) + + def test_tagged_words(self): + tagged_words = indian.tagged_words()[:3] + self.assertEqual( + tagged_words, [("মহিষের", "NN"), ("সন্তান", "NN"), (":", "SYM")] + ) + + +class TestCess(unittest.TestCase): + def test_catalan(self): + words = cess_cat.words()[:15] + txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial" + self.assertEqual(words, txt.split()) + self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs") + + def test_esp(self): + words = cess_esp.words()[:15] + txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del" + self.assertEqual(words, txt.split()) + self.assertEqual(cess_esp.words()[115], "años") + + +class TestFloresta(unittest.TestCase): + def test_words(self): + words = floresta.words()[:10] + txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a" + self.assertEqual(words, txt.split()) + + +class TestSinicaTreebank(unittest.TestCase): + def test_sents(self): + first_3_sents = sinica_treebank.sents()[:3] + self.assertEqual( + first_3_sents, [["一"], ["友情"], ["嘉珍", "和", "我", "住在", "同一條", "巷子"]] + ) + + def test_parsed_sents(self): + parsed_sents = sinica_treebank.parsed_sents()[25] + self.assertEqual( + parsed_sents, + Tree( + "S", + [ + Tree("NP", [Tree("Nba", ["嘉珍"])]), + Tree("V‧地", [Tree("VA11", ["不停"]), Tree("DE", ["的"])]), + Tree("VA4", ["哭泣"]), + ], + ), + ) + + +class TestCoNLL2007(unittest.TestCase): + # Reading the CoNLL 2007 Dependency Treebanks + + def test_sents(self): + sents = conll2007.sents("esp.train")[0] + self.assertEqual( + sents[:6], ["El", "aumento", "del", "índice", "de", "desempleo"] + ) + + def test_parsed_sents(self): + + parsed_sents = conll2007.parsed_sents("esp.train")[0] + + self.assertEqual( + parsed_sents.tree(), + Tree( + "fortaleció", + [ + Tree( + "aumento", + [ + "El", + Tree( + "del", + [ + Tree( + "índice", + [ + Tree( + "de", + [Tree("desempleo", ["estadounidense"])], + ) + ], + ) + ], + ), + ], + ), + "hoy", + "considerablemente", + Tree( + "al", + [ + Tree( + "euro", + [ + Tree( + "cotizaba", + [ + ",", + "que", + Tree("a", [Tree("15.35", ["las", "GMT"])]), + "se", + Tree( + "en", + [ + Tree( + "mercado", + [ + "el", + Tree("de", ["divisas"]), + Tree("de", ["Fráncfort"]), + ], + ) + ], + ), + Tree("a", ["0,9452_dólares"]), + Tree( + "frente_a", + [ + ",", + Tree( + "0,9349_dólares", + [ + "los", + Tree( + "de", + [ + Tree( + "mañana", + ["esta"], + ) + ], + ), + ], + ), + ], + ), + ], + ) + ], + ) + ], + ), + ".", + ], + ), + ) + + +@pytest.mark.skipif( + not ptb.fileids(), + reason="A full installation of the Penn Treebank is not available", +) +class TestPTB(unittest.TestCase): + def test_fileids(self): + self.assertEqual( + ptb.fileids()[:4], + [ + "BROWN/CF/CF01.MRG", + "BROWN/CF/CF02.MRG", + "BROWN/CF/CF03.MRG", + "BROWN/CF/CF04.MRG", + ], + ) + + def test_words(self): + self.assertEqual( + ptb.words("WSJ/00/WSJ_0003.MRG")[:7], + ["A", "form", "of", "asbestos", "once", "used", "*"], + ) + + def test_tagged_words(self): + self.assertEqual( + ptb.tagged_words("WSJ/00/WSJ_0003.MRG")[:3], + [("A", "DT"), ("form", "NN"), ("of", "IN")], + ) + + def test_categories(self): + self.assertEqual( + ptb.categories(), + [ + "adventure", + "belles_lettres", + "fiction", + "humor", + "lore", + "mystery", + "news", + "romance", + "science_fiction", + ], + ) + + def test_news_fileids(self): + self.assertEqual( + ptb.fileids("news")[:3], + ["WSJ/00/WSJ_0001.MRG", "WSJ/00/WSJ_0002.MRG", "WSJ/00/WSJ_0003.MRG"], + ) + + def test_category_words(self): + self.assertEqual( + ptb.words(categories=["humor", "fiction"])[:6], + ["Thirty-three", "Scotty", "did", "not", "go", "back"], + ) + + +@pytest.mark.skip("Skipping test for mwa_ppdb.") +class TestMWAPPDB(unittest.TestCase): + def test_fileids(self): + self.assertEqual( + mwa_ppdb.fileids(), ["ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"] + ) + + def test_entries(self): + self.assertEqual( + mwa_ppdb.entries()[:10], + [ + ("10/17/01", "17/10/2001"), + ("102,70", "102.70"), + ("13,53", "13.53"), + ("3.2.5.3.2.1", "3.2.5.3.2.1."), + ("53,76", "53.76"), + ("6.9.5", "6.9.5."), + ("7.7.6.3", "7.7.6.3."), + ("76,20", "76.20"), + ("79,85", "79.85"), + ("93,65", "93.65"), + ], + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_data.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..b05eea84bfaaca4e439f319057d56821764f75c7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_data.py @@ -0,0 +1,15 @@ +import pytest + +import nltk.data + + +def test_find_raises_exception(): + with pytest.raises(LookupError): + nltk.data.find("no_such_resource/foo") + + +def test_find_raises_exception_with_full_resource_name(): + no_such_thing = "no_such_thing/bar" + with pytest.raises(LookupError) as exc: + nltk.data.find(no_such_thing) + assert no_such_thing in str(exc) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py new file mode 100644 index 0000000000000000000000000000000000000000..1f29add9058e25b2a2736ce513734655c85d4abf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py @@ -0,0 +1,144 @@ +import unittest + +from nltk.metrics.agreement import AnnotationTask + + +class TestDisagreement(unittest.TestCase): + + """ + Class containing unit tests for nltk.metrics.agreement.Disagreement. + """ + + def test_easy(self): + """ + Simple test, based on + https://github.com/foolswood/krippendorffs_alpha/raw/master/krippendorff.pdf. + """ + data = [ + ("coder1", "dress1", "YES"), + ("coder2", "dress1", "NO"), + ("coder3", "dress1", "NO"), + ("coder1", "dress2", "YES"), + ("coder2", "dress2", "NO"), + ("coder3", "dress3", "NO"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) + + def test_easy2(self): + """ + Same simple test with 1 rating removed. + Removal of that rating should not matter: K-Apha ignores items with + only 1 rating. + """ + data = [ + ("coder1", "dress1", "YES"), + ("coder2", "dress1", "NO"), + ("coder3", "dress1", "NO"), + ("coder1", "dress2", "YES"), + ("coder2", "dress2", "NO"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) + + def test_advanced(self): + """ + More advanced test, based on + http://www.agreestat.com/research_papers/onkrippendorffalpha.pdf + """ + data = [ + ("A", "1", "1"), + ("B", "1", "1"), + ("D", "1", "1"), + ("A", "2", "2"), + ("B", "2", "2"), + ("C", "2", "3"), + ("D", "2", "2"), + ("A", "3", "3"), + ("B", "3", "3"), + ("C", "3", "3"), + ("D", "3", "3"), + ("A", "4", "3"), + ("B", "4", "3"), + ("C", "4", "3"), + ("D", "4", "3"), + ("A", "5", "2"), + ("B", "5", "2"), + ("C", "5", "2"), + ("D", "5", "2"), + ("A", "6", "1"), + ("B", "6", "2"), + ("C", "6", "3"), + ("D", "6", "4"), + ("A", "7", "4"), + ("B", "7", "4"), + ("C", "7", "4"), + ("D", "7", "4"), + ("A", "8", "1"), + ("B", "8", "1"), + ("C", "8", "2"), + ("D", "8", "1"), + ("A", "9", "2"), + ("B", "9", "2"), + ("C", "9", "2"), + ("D", "9", "2"), + ("B", "10", "5"), + ("C", "10", "5"), + ("D", "10", "5"), + ("C", "11", "1"), + ("D", "11", "1"), + ("C", "12", "3"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632) + + def test_advanced2(self): + """ + Same more advanced example, but with 1 rating removed. + Again, removal of that 1 rating should not matter. + """ + data = [ + ("A", "1", "1"), + ("B", "1", "1"), + ("D", "1", "1"), + ("A", "2", "2"), + ("B", "2", "2"), + ("C", "2", "3"), + ("D", "2", "2"), + ("A", "3", "3"), + ("B", "3", "3"), + ("C", "3", "3"), + ("D", "3", "3"), + ("A", "4", "3"), + ("B", "4", "3"), + ("C", "4", "3"), + ("D", "4", "3"), + ("A", "5", "2"), + ("B", "5", "2"), + ("C", "5", "2"), + ("D", "5", "2"), + ("A", "6", "1"), + ("B", "6", "2"), + ("C", "6", "3"), + ("D", "6", "4"), + ("A", "7", "4"), + ("B", "7", "4"), + ("C", "7", "4"), + ("D", "7", "4"), + ("A", "8", "1"), + ("B", "8", "1"), + ("C", "8", "2"), + ("D", "8", "1"), + ("A", "9", "2"), + ("B", "9", "2"), + ("C", "9", "2"), + ("D", "9", "2"), + ("B", "10", "5"), + ("C", "10", "5"), + ("D", "10", "5"), + ("C", "11", "1"), + ("D", "11", "1"), + ("C", "12", "3"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_distance.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..71e0bf06a6a5d3e75ceefa06670542303803d3eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_distance.py @@ -0,0 +1,129 @@ +from typing import Tuple + +import pytest + +from nltk.metrics.distance import edit_distance + + +class TestEditDistance: + @pytest.mark.parametrize( + "left,right,substitution_cost,expecteds", + [ + # Allowing transpositions reduces the number of edits required. + # with transpositions: + # e.g. "abc" -T-> "cba" -D-> "ca": 2 steps + # + # without transpositions: + # e.g. "abc" -D-> "ab" -D-> "a" -I-> "ca": 3 steps + ("abc", "ca", 1, (2, 3)), + ("abc", "ca", 5, (2, 3)), # Doesn't *require* substitutions + # Note, a substition_cost of higher than 2 doesn't make much + # sense, as a deletion + insertion is identical, and always + # costs 2. + # + # + # Transpositions don't always reduce the number of edits required: + # with or without transpositions: + # e.g. "wants" -D-> "wats" -D-> "was" -I-> "wasp": 3 steps + ("wants", "wasp", 1, (3, 3)), + ("wants", "wasp", 5, (3, 3)), # Doesn't *require* substitutions + # + # + # Ought to have the same results with and without transpositions + # with or without transpositions: + # e.g. "rain" -S-> "sain" -S-> "shin" -I-> "shine": 3 steps + # (but cost 5 if substitution_cost=2) + ("rain", "shine", 1, (3, 3)), + ("rain", "shine", 2, (5, 5)), # Does *require* substitutions + # + # + # Several potentially interesting typos + # with transpositions: + # e.g. "acbdef" -T-> "abcdef": 1 step + # + # without transpositions: + # e.g. "acbdef" -D-> "abdef" -I-> "abcdef": 2 steps + ("acbdef", "abcdef", 1, (1, 2)), + ("acbdef", "abcdef", 2, (1, 2)), # Doesn't *require* substitutions + # + # + # with transpositions: + # e.g. "lnaguaeg" -T-> "languaeg" -T-> "language": 2 steps + # + # without transpositions: + # e.g. "lnaguaeg" -D-> "laguaeg" -I-> "languaeg" -D-> "languag" -I-> "language": 4 steps + ("lnaguaeg", "language", 1, (2, 4)), + ("lnaguaeg", "language", 2, (2, 4)), # Doesn't *require* substitutions + # + # + # with transpositions: + # e.g. "lnaugage" -T-> "lanugage" -T-> "language": 2 steps + # + # without transpositions: + # e.g. "lnaugage" -S-> "lnangage" -D-> "langage" -I-> "language": 3 steps + # (but one substitution, so a cost of 4 if substition_cost = 2) + ("lnaugage", "language", 1, (2, 3)), + ("lnaugage", "language", 2, (2, 4)), + # Does *require* substitutions if no transpositions + # + # + # with transpositions: + # e.g. "lngauage" -T-> "lnaguage" -T-> "language": 2 steps + # without transpositions: + # e.g. "lngauage" -I-> "lanaguage" -D-> "language": 2 steps + ("lngauage", "language", 1, (2, 2)), + ("lngauage", "language", 2, (2, 2)), # Doesn't *require* substitutions + # + # + # with or without transpositions: + # e.g. "wants" -S-> "sants" -S-> "swnts" -S-> "swits" -S-> "swims" -D-> "swim": 5 steps + # + # with substitution_cost=2 and transpositions: + # e.g. "wants" -T-> "santw" -D-> "sntw" -D-> "stw" -D-> "sw" + # -I-> "swi" -I-> "swim": 6 steps + # + # with substitution_cost=2 and no transpositions: + # e.g. "wants" -I-> "swants" -D-> "swant" -D-> "swan" -D-> "swa" -D-> "sw" + # -I-> "swi" -I-> "swim": 7 steps + ("wants", "swim", 1, (5, 5)), + ("wants", "swim", 2, (6, 7)), + # + # + # with or without transpositions: + # e.g. "kitten" -S-> "sitten" -s-> "sittin" -I-> "sitting": 3 steps + # (but cost 5 if substitution_cost=2) + ("kitten", "sitting", 1, (3, 3)), + ("kitten", "sitting", 2, (5, 5)), + # + # duplicated letter + # e.g. "duplicated" -D-> "duplicated" + ("duplicated", "duuplicated", 1, (1, 1)), + ("duplicated", "duuplicated", 2, (1, 1)), + ("very duplicated", "very duuplicateed", 2, (2, 2)), + ], + ) + def test_with_transpositions( + self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int] + ): + """ + Test `edit_distance` between two strings, given some `substitution_cost`, + and whether transpositions are allowed. + + :param str left: First input string to `edit_distance`. + :param str right: Second input string to `edit_distance`. + :param int substitution_cost: The cost of a substitution action in `edit_distance`. + :param Tuple[int, int] expecteds: A tuple of expected outputs, such that `expecteds[0]` is + the expected output with `transpositions=True`, and `expecteds[1]` is + the expected output with `transpositions=False`. + """ + # Test the input strings in both orderings + for s1, s2 in ((left, right), (right, left)): + # zip with [True, False] to get the transpositions value + for expected, transpositions in zip(expecteds, [True, False]): + predicted = edit_distance( + s1, + s2, + substitution_cost=substitution_cost, + transpositions=transpositions, + ) + assert predicted == expected diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_downloader.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..408372259142592003a3689cb35a0430e0bec190 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_downloader.py @@ -0,0 +1,19 @@ +from nltk import download + + +def test_downloader_using_existing_parent_download_dir(tmp_path): + """Test that download works properly when the parent folder of the download_dir exists""" + + download_dir = str(tmp_path.joinpath("another_dir")) + download_status = download("mwa_ppdb", download_dir) + assert download_status is True + + +def test_downloader_using_non_existing_parent_download_dir(tmp_path): + """Test that download works properly when the parent folder of the download_dir does not exist""" + + download_dir = str( + tmp_path.joinpath("non-existing-parent-folder", "another-non-existing-folder") + ) + download_status = download("mwa_ppdb", download_dir) + assert download_status is True diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_hmm.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_hmm.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce5213230ae4c58ba58ff94872b7240f5884d79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_hmm.py @@ -0,0 +1,82 @@ +import pytest + +from nltk.tag import hmm + + +def _wikipedia_example_hmm(): + # Example from wikipedia + # (https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm) + + states = ["rain", "no rain"] + symbols = ["umbrella", "no umbrella"] + + A = [[0.7, 0.3], [0.3, 0.7]] # transition probabilities + B = [[0.9, 0.1], [0.2, 0.8]] # emission probabilities + pi = [0.5, 0.5] # initial probabilities + + seq = ["umbrella", "umbrella", "no umbrella", "umbrella", "umbrella"] + seq = list(zip(seq, [None] * len(seq))) + + model = hmm._create_hmm_tagger(states, symbols, A, B, pi) + return model, states, symbols, seq + + +def test_forward_probability(): + from numpy.testing import assert_array_almost_equal + + # example from p. 385, Huang et al + model, states, symbols = hmm._market_hmm_example() + seq = [("up", None), ("up", None)] + expected = [[0.35, 0.02, 0.09], [0.1792, 0.0085, 0.0357]] + + fp = 2 ** model._forward_probability(seq) + + assert_array_almost_equal(fp, expected) + + +def test_forward_probability2(): + from numpy.testing import assert_array_almost_equal + + model, states, symbols, seq = _wikipedia_example_hmm() + fp = 2 ** model._forward_probability(seq) + + # examples in wikipedia are normalized + fp = (fp.T / fp.sum(axis=1)).T + + wikipedia_results = [ + [0.8182, 0.1818], + [0.8834, 0.1166], + [0.1907, 0.8093], + [0.7308, 0.2692], + [0.8673, 0.1327], + ] + + assert_array_almost_equal(wikipedia_results, fp, 4) + + +def test_backward_probability(): + from numpy.testing import assert_array_almost_equal + + model, states, symbols, seq = _wikipedia_example_hmm() + + bp = 2 ** model._backward_probability(seq) + # examples in wikipedia are normalized + + bp = (bp.T / bp.sum(axis=1)).T + + wikipedia_results = [ + # Forward-backward algorithm doesn't need b0_5, + # so .backward_probability doesn't compute it. + # [0.6469, 0.3531], + [0.5923, 0.4077], + [0.3763, 0.6237], + [0.6533, 0.3467], + [0.6273, 0.3727], + [0.5, 0.5], + ] + + assert_array_almost_equal(wikipedia_results, bp, 4) + + +def setup_module(module): + pytest.importorskip("numpy") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..ab99d31d6a3255a388747487c969ece568324f52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py @@ -0,0 +1,66 @@ +import unittest + +from nltk.metrics import ( + BigramAssocMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) + +## Test the likelihood ratio metric + +_DELTA = 1e-8 + + +class TestLikelihoodRatio(unittest.TestCase): + def test_lr_bigram(self): + self.assertAlmostEqual( + BigramAssocMeasures.likelihood_ratio(2, (4, 4), 20), + 2.4142743368419755, + delta=_DELTA, + ) + self.assertAlmostEqual( + BigramAssocMeasures.likelihood_ratio(1, (1, 1), 1), 0.0, delta=_DELTA + ) + self.assertRaises( + ValueError, + BigramAssocMeasures.likelihood_ratio, + *(0, (2, 2), 2), + ) + + def test_lr_trigram(self): + self.assertAlmostEqual( + TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 2), + 5.545177444479562, + delta=_DELTA, + ) + self.assertAlmostEqual( + TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 1), + 0.0, + delta=_DELTA, + ) + self.assertRaises( + ValueError, + TrigramAssocMeasures.likelihood_ratio, + *(1, (1, 1, 2), (1, 1, 2), 2), + ) + + def test_lr_quadgram(self): + self.assertAlmostEqual( + QuadgramAssocMeasures.likelihood_ratio( + 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 2 + ), + 8.317766166719343, + delta=_DELTA, + ) + self.assertAlmostEqual( + QuadgramAssocMeasures.likelihood_ratio( + 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 1 + ), + 0.0, + delta=_DELTA, + ) + self.assertRaises( + ValueError, + QuadgramAssocMeasures.likelihood_ratio, + *(1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 2), (1, 1, 1, 1), 1), + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_naivebayes.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_naivebayes.py new file mode 100644 index 0000000000000000000000000000000000000000..a5acf29ba05ed17da4179f6991256199dd739f63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_naivebayes.py @@ -0,0 +1,21 @@ +import unittest + +from nltk.classify.naivebayes import NaiveBayesClassifier + + +class NaiveBayesClassifierTest(unittest.TestCase): + def test_simple(self): + training_features = [ + ({"nice": True, "good": True}, "positive"), + ({"bad": True, "mean": True}, "negative"), + ] + + classifier = NaiveBayesClassifier.train(training_features) + + result = classifier.prob_classify({"nice": True}) + self.assertTrue(result.prob("positive") > result.prob("negative")) + self.assertEqual(result.max(), "positive") + + result = classifier.prob_classify({"bad": True}) + self.assertTrue(result.prob("positive") < result.prob("negative")) + self.assertEqual(result.max(), "negative") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_nombank.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_nombank.py new file mode 100644 index 0000000000000000000000000000000000000000..395d7bb3cab90c00ae3775faa15092a343d929a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_nombank.py @@ -0,0 +1,27 @@ +""" +Unit tests for nltk.corpus.nombank +""" + +import unittest + +from nltk.corpus import nombank + +# Load the nombank once. +nombank.nouns() + + +class NombankDemo(unittest.TestCase): + def test_numbers(self): + # No. of instances. + self.assertEqual(len(nombank.instances()), 114574) + # No. of rolesets + self.assertEqual(len(nombank.rolesets()), 5577) + # No. of nouns. + self.assertEqual(len(nombank.nouns()), 4704) + + def test_instance(self): + self.assertEqual(nombank.instances()[0].roleset, "perc-sign.01") + + def test_framefiles_fileids(self): + self.assertEqual(len(nombank.fileids()), 4705) + self.assertTrue(all(fileid.endswith(".xml") for fileid in nombank.fileids())) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pl196x.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pl196x.py new file mode 100644 index 0000000000000000000000000000000000000000..e175f8dc0061a983af011010e48fe9567c9d314a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pl196x.py @@ -0,0 +1,13 @@ +import unittest + +import nltk +from nltk.corpus.reader import pl196x + + +class TestCorpusViews(unittest.TestCase): + def test_corpus_reader(self): + pl196x_dir = nltk.data.find("corpora/pl196x") + pl = pl196x.Pl196xCorpusReader( + pl196x_dir, r".*\.xml", textids="textids.txt", cat_file="cats.txt" + ) + pl.tagged_words(fileids=pl.fileids(), categories="cats.txt") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pos_tag.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pos_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..4e2dc20967969ec2cb38ea925906886ef50a7a69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_pos_tag.py @@ -0,0 +1,83 @@ +""" +Tests for nltk.pos_tag +""" + + +import unittest + +from nltk import pos_tag, word_tokenize + + +class TestPosTag(unittest.TestCase): + def test_pos_tag_eng(self): + text = "John's big idea isn't all that bad." + expected_tagged = [ + ("John", "NNP"), + ("'s", "POS"), + ("big", "JJ"), + ("idea", "NN"), + ("is", "VBZ"), + ("n't", "RB"), + ("all", "PDT"), + ("that", "DT"), + ("bad", "JJ"), + (".", "."), + ] + assert pos_tag(word_tokenize(text)) == expected_tagged + + def test_pos_tag_eng_universal(self): + text = "John's big idea isn't all that bad." + expected_tagged = [ + ("John", "NOUN"), + ("'s", "PRT"), + ("big", "ADJ"), + ("idea", "NOUN"), + ("is", "VERB"), + ("n't", "ADV"), + ("all", "DET"), + ("that", "DET"), + ("bad", "ADJ"), + (".", "."), + ] + assert pos_tag(word_tokenize(text), tagset="universal") == expected_tagged + + def test_pos_tag_rus(self): + text = "Илья оторопел и дважды перечитал бумажку." + expected_tagged = [ + ("Илья", "S"), + ("оторопел", "V"), + ("и", "CONJ"), + ("дважды", "ADV"), + ("перечитал", "V"), + ("бумажку", "S"), + (".", "NONLEX"), + ] + assert pos_tag(word_tokenize(text), lang="rus") == expected_tagged + + def test_pos_tag_rus_universal(self): + text = "Илья оторопел и дважды перечитал бумажку." + expected_tagged = [ + ("Илья", "NOUN"), + ("оторопел", "VERB"), + ("и", "CONJ"), + ("дважды", "ADV"), + ("перечитал", "VERB"), + ("бумажку", "NOUN"), + (".", "."), + ] + assert ( + pos_tag(word_tokenize(text), tagset="universal", lang="rus") + == expected_tagged + ) + + def test_pos_tag_unknown_lang(self): + text = "모르겠 습니 다" + self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang="kor") + # Test for default kwarg, `lang=None` + self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang=None) + + def test_unspecified_lang(self): + # Tries to force the lang='eng' option. + text = "모르겠 습니 다" + expected_but_wrong = [("모르겠", "JJ"), ("습니", "NNP"), ("다", "NN")] + assert pos_tag(word_tokenize(text)) == expected_but_wrong diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_ribes.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_ribes.py new file mode 100644 index 0000000000000000000000000000000000000000..f1efcdad195766451423e721e2f09242e8bf7de5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_ribes.py @@ -0,0 +1,246 @@ +from nltk.translate.ribes_score import corpus_ribes, word_rank_alignment + + +def test_ribes_empty_worder(): # worder as in word order + # Verifies that these two sentences have no alignment, + # and hence have the lowest possible RIBES score. + hyp = "This is a nice sentence which I quite like".split() + ref = "Okay well that's neat and all but the reference's different".split() + + assert word_rank_alignment(ref, hyp) == [] + + list_of_refs = [[ref]] + hypotheses = [hyp] + assert corpus_ribes(list_of_refs, hypotheses) == 0.0 + + +def test_ribes_one_worder(): + # Verifies that these two sentences have just one match, + # and the RIBES score for this sentence with very little + # correspondence is 0. + hyp = "This is a nice sentence which I quite like".split() + ref = "Okay well that's nice and all but the reference's different".split() + + assert word_rank_alignment(ref, hyp) == [3] + + list_of_refs = [[ref]] + hypotheses = [hyp] + assert corpus_ribes(list_of_refs, hypotheses) == 0.0 + + +def test_ribes_two_worder(): + # Verifies that these two sentences have two matches, + # but still get the lowest possible RIBES score due + # to the lack of similarity. + hyp = "This is a nice sentence which I quite like".split() + ref = "Okay well that's nice and all but the reference is different".split() + + assert word_rank_alignment(ref, hyp) == [9, 3] + + list_of_refs = [[ref]] + hypotheses = [hyp] + assert corpus_ribes(list_of_refs, hypotheses) == 0.0 + + +def test_ribes(): + # Based on the doctest of the corpus_ribes function + hyp1 = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "which", + "ensures", + "that", + "the", + "military", + "always", + "obeys", + "the", + "commands", + "of", + "the", + "party", + ] + ref1a = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "that", + "ensures", + "that", + "the", + "military", + "will", + "forever", + "heed", + "Party", + "commands", + ] + ref1b = [ + "It", + "is", + "the", + "guiding", + "principle", + "which", + "guarantees", + "the", + "military", + "forces", + "always", + "being", + "under", + "the", + "command", + "of", + "the", + "Party", + ] + ref1c = [ + "It", + "is", + "the", + "practical", + "guide", + "for", + "the", + "army", + "always", + "to", + "heed", + "the", + "directions", + "of", + "the", + "party", + ] + + hyp2 = [ + "he", + "read", + "the", + "book", + "because", + "he", + "was", + "interested", + "in", + "world", + "history", + ] + ref2a = [ + "he", + "was", + "interested", + "in", + "world", + "history", + "because", + "he", + "read", + "the", + "book", + ] + + list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]] + hypotheses = [hyp1, hyp2] + + score = corpus_ribes(list_of_refs, hypotheses) + + assert round(score, 4) == 0.3597 + + +def test_no_zero_div(): + # Regression test for Issue 2529, assure that no ZeroDivisionError is thrown. + hyp1 = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "which", + "ensures", + "that", + "the", + "military", + "always", + "obeys", + "the", + "commands", + "of", + "the", + "party", + ] + ref1a = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "that", + "ensures", + "that", + "the", + "military", + "will", + "forever", + "heed", + "Party", + "commands", + ] + ref1b = [ + "It", + "is", + "the", + "guiding", + "principle", + "which", + "guarantees", + "the", + "military", + "forces", + "always", + "being", + "under", + "the", + "command", + "of", + "the", + "Party", + ] + ref1c = [ + "It", + "is", + "the", + "practical", + "guide", + "for", + "the", + "army", + "always", + "to", + "heed", + "the", + "directions", + "of", + "the", + "party", + ] + + hyp2 = ["he", "read", "the"] + ref2a = ["he", "was", "interested", "in", "world", "history", "because", "he"] + + list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]] + hypotheses = [hyp1, hyp2] + + score = corpus_ribes(list_of_refs, hypotheses) + + assert round(score, 4) == 0.1688 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_senna.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_senna.py new file mode 100644 index 0000000000000000000000000000000000000000..067f9e30c09a4b963b01cb0c825741a208005874 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_senna.py @@ -0,0 +1,112 @@ +""" +Unit tests for Senna +""" + +import unittest +from os import environ, path, sep + +from nltk.classify import Senna +from nltk.tag import SennaChunkTagger, SennaNERTagger, SennaTagger + +# Set Senna executable path for tests if it is not specified as an environment variable +if "SENNA" in environ: + SENNA_EXECUTABLE_PATH = path.normpath(environ["SENNA"]) + sep +else: + SENNA_EXECUTABLE_PATH = "/usr/share/senna-v3.0" + +senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH) + + +@unittest.skipUnless(senna_is_installed, "Requires Senna executable") +class TestSennaPipeline(unittest.TestCase): + """Unittest for nltk.classify.senna""" + + def test_senna_pipeline(self): + """Senna pipeline interface""" + + pipeline = Senna(SENNA_EXECUTABLE_PATH, ["pos", "chk", "ner"]) + sent = "Dusseldorf is an international business center".split() + result = [ + (token["word"], token["chk"], token["ner"], token["pos"]) + for token in pipeline.tag(sent) + ] + expected = [ + ("Dusseldorf", "B-NP", "B-LOC", "NNP"), + ("is", "B-VP", "O", "VBZ"), + ("an", "B-NP", "O", "DT"), + ("international", "I-NP", "O", "JJ"), + ("business", "I-NP", "O", "NN"), + ("center", "I-NP", "O", "NN"), + ] + self.assertEqual(result, expected) + + +@unittest.skipUnless(senna_is_installed, "Requires Senna executable") +class TestSennaTagger(unittest.TestCase): + """Unittest for nltk.tag.senna""" + + def test_senna_tagger(self): + tagger = SennaTagger(SENNA_EXECUTABLE_PATH) + result = tagger.tag("What is the airspeed of an unladen swallow ?".split()) + expected = [ + ("What", "WP"), + ("is", "VBZ"), + ("the", "DT"), + ("airspeed", "NN"), + ("of", "IN"), + ("an", "DT"), + ("unladen", "NN"), + ("swallow", "NN"), + ("?", "."), + ] + self.assertEqual(result, expected) + + def test_senna_chunk_tagger(self): + chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH) + result_1 = chktagger.tag("What is the airspeed of an unladen swallow ?".split()) + expected_1 = [ + ("What", "B-NP"), + ("is", "B-VP"), + ("the", "B-NP"), + ("airspeed", "I-NP"), + ("of", "B-PP"), + ("an", "B-NP"), + ("unladen", "I-NP"), + ("swallow", "I-NP"), + ("?", "O"), + ] + + result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type="NP")) + expected_2 = [ + ("What", "0"), + ("the airspeed", "2-3"), + ("an unladen swallow", "5-6-7"), + ] + self.assertEqual(result_1, expected_1) + self.assertEqual(result_2, expected_2) + + def test_senna_ner_tagger(self): + nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH) + result_1 = nertagger.tag("Shakespeare theatre was in London .".split()) + expected_1 = [ + ("Shakespeare", "B-PER"), + ("theatre", "O"), + ("was", "O"), + ("in", "O"), + ("London", "B-LOC"), + (".", "O"), + ] + + result_2 = nertagger.tag("UN headquarters are in NY , USA .".split()) + expected_2 = [ + ("UN", "B-ORG"), + ("headquarters", "O"), + ("are", "O"), + ("in", "O"), + ("NY", "B-LOC"), + (",", "O"), + ("USA", "B-LOC"), + (".", "O"), + ] + self.assertEqual(result_1, expected_1) + self.assertEqual(result_2, expected_2) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_stem.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_stem.py new file mode 100644 index 0000000000000000000000000000000000000000..0b0b0404ece1cd64a7539967a2d36e8d80ab5cea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_stem.py @@ -0,0 +1,157 @@ +import unittest +from contextlib import closing + +from nltk import data +from nltk.stem.porter import PorterStemmer +from nltk.stem.snowball import SnowballStemmer + + +class SnowballTest(unittest.TestCase): + def test_arabic(self): + """ + this unit testing for test the snowball arabic light stemmer + this stemmer deals with prefixes and suffixes + """ + # Test where the ignore_stopwords=True. + ar_stemmer = SnowballStemmer("arabic", True) + assert ar_stemmer.stem("الْعَرَبِــــــيَّة") == "عرب" + assert ar_stemmer.stem("العربية") == "عرب" + assert ar_stemmer.stem("فقالوا") == "قال" + assert ar_stemmer.stem("الطالبات") == "طالب" + assert ar_stemmer.stem("فالطالبات") == "طالب" + assert ar_stemmer.stem("والطالبات") == "طالب" + assert ar_stemmer.stem("الطالبون") == "طالب" + assert ar_stemmer.stem("اللذان") == "اللذان" + assert ar_stemmer.stem("من") == "من" + # Test where the ignore_stopwords=False. + ar_stemmer = SnowballStemmer("arabic", False) + assert ar_stemmer.stem("اللذان") == "اللذ" # this is a stop word + assert ar_stemmer.stem("الطالبات") == "طالب" + assert ar_stemmer.stem("الكلمات") == "كلم" + # test where create the arabic stemmer without given init value to ignore_stopwords + ar_stemmer = SnowballStemmer("arabic") + assert ar_stemmer.stem("الْعَرَبِــــــيَّة") == "عرب" + assert ar_stemmer.stem("العربية") == "عرب" + assert ar_stemmer.stem("فقالوا") == "قال" + assert ar_stemmer.stem("الطالبات") == "طالب" + assert ar_stemmer.stem("الكلمات") == "كلم" + + def test_russian(self): + stemmer_russian = SnowballStemmer("russian") + assert stemmer_russian.stem("авантненькая") == "авантненьк" + + def test_german(self): + stemmer_german = SnowballStemmer("german") + stemmer_german2 = SnowballStemmer("german", ignore_stopwords=True) + + assert stemmer_german.stem("Schr\xe4nke") == "schrank" + assert stemmer_german2.stem("Schr\xe4nke") == "schrank" + + assert stemmer_german.stem("keinen") == "kein" + assert stemmer_german2.stem("keinen") == "keinen" + + def test_spanish(self): + stemmer = SnowballStemmer("spanish") + + assert stemmer.stem("Visionado") == "vision" + + # The word 'algue' was raising an IndexError + assert stemmer.stem("algue") == "algu" + + def test_short_strings_bug(self): + stemmer = SnowballStemmer("english") + assert stemmer.stem("y's") == "y" + + +class PorterTest(unittest.TestCase): + def _vocabulary(self): + with closing( + data.find("stemmers/porter_test/porter_vocabulary.txt").open( + encoding="utf-8" + ) + ) as fp: + return fp.read().splitlines() + + def _test_against_expected_output(self, stemmer_mode, expected_stems): + stemmer = PorterStemmer(mode=stemmer_mode) + for word, true_stem in zip(self._vocabulary(), expected_stems): + our_stem = stemmer.stem(word) + assert ( + our_stem == true_stem + ), "{} should stem to {} in {} mode but got {}".format( + word, + true_stem, + stemmer_mode, + our_stem, + ) + + def test_vocabulary_martin_mode(self): + """Tests all words from the test vocabulary provided by M Porter + + The sample vocabulary and output were sourced from + https://tartarus.org/martin/PorterStemmer/voc.txt and + https://tartarus.org/martin/PorterStemmer/output.txt + and are linked to from the Porter Stemmer algorithm's homepage + at https://tartarus.org/martin/PorterStemmer/ + """ + with closing( + data.find("stemmers/porter_test/porter_martin_output.txt").open( + encoding="utf-8" + ) + ) as fp: + self._test_against_expected_output( + PorterStemmer.MARTIN_EXTENSIONS, fp.read().splitlines() + ) + + def test_vocabulary_nltk_mode(self): + with closing( + data.find("stemmers/porter_test/porter_nltk_output.txt").open( + encoding="utf-8" + ) + ) as fp: + self._test_against_expected_output( + PorterStemmer.NLTK_EXTENSIONS, fp.read().splitlines() + ) + + def test_vocabulary_original_mode(self): + # The list of stems for this test was generated by taking the + # Martin-blessed stemmer from + # https://tartarus.org/martin/PorterStemmer/c.txt + # and removing all the --DEPARTURE-- sections from it and + # running it against Martin's test vocabulary. + + with closing( + data.find("stemmers/porter_test/porter_original_output.txt").open( + encoding="utf-8" + ) + ) as fp: + self._test_against_expected_output( + PorterStemmer.ORIGINAL_ALGORITHM, fp.read().splitlines() + ) + + self._test_against_expected_output( + PorterStemmer.ORIGINAL_ALGORITHM, + data.find("stemmers/porter_test/porter_original_output.txt") + .open(encoding="utf-8") + .read() + .splitlines(), + ) + + def test_oed_bug(self): + """Test for bug https://github.com/nltk/nltk/issues/1581 + + Ensures that 'oed' can be stemmed without throwing an error. + """ + assert PorterStemmer().stem("oed") == "o" + + def test_lowercase_option(self): + """Test for improvement on https://github.com/nltk/nltk/issues/2507 + + Ensures that stems are lowercased when `to_lowercase=True` + """ + porter = PorterStemmer() + assert porter.stem("On") == "on" + assert porter.stem("I") == "i" + assert porter.stem("I", to_lowercase=False) == "I" + assert porter.stem("Github") == "github" + assert porter.stem("Github", to_lowercase=False) == "Github" diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tag.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..2074b1bbc5f11e06d6a30e741e6618888b7b7511 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tag.py @@ -0,0 +1,23 @@ +def test_basic(): + from nltk.tag import pos_tag + from nltk.tokenize import word_tokenize + + result = pos_tag(word_tokenize("John's big idea isn't all that bad.")) + assert result == [ + ("John", "NNP"), + ("'s", "POS"), + ("big", "JJ"), + ("idea", "NN"), + ("is", "VBZ"), + ("n't", "RB"), + ("all", "PDT"), + ("that", "DT"), + ("bad", "JJ"), + (".", "."), + ] + + +def setup_module(module): + import pytest + + pytest.importorskip("numpy") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3c08bb7a034748f3e4b70273e8c171b45f4183 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py @@ -0,0 +1,780 @@ +#!/usr/bin/env python +# +# Natural Language Toolkit: TGrep search +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Roberts +# URL: +# For license information, see LICENSE.TXT + +""" +Unit tests for nltk.tgrep. +""" + + +import unittest + +from nltk import tgrep +from nltk.tree import ParentedTree + + +class TestSequenceFunctions(unittest.TestCase): + + """ + Class containing unit tests for nltk.tgrep. + """ + + def test_tokenize_simple(self): + """ + Simple test of tokenization. + """ + tokens = tgrep.tgrep_tokenize("A .. (B !< C . D) | ![<< (E , F) $ G]") + self.assertEqual( + tokens, + [ + "A", + "..", + "(", + "B", + "!", + "<", + "C", + ".", + "D", + ")", + "|", + "!", + "[", + "<<", + "(", + "E", + ",", + "F", + ")", + "$", + "G", + "]", + ], + ) + + def test_tokenize_encoding(self): + """ + Test that tokenization handles bytes and strs the same way. + """ + self.assertEqual( + tgrep.tgrep_tokenize(b"A .. (B !< C . D) | ![<< (E , F) $ G]"), + tgrep.tgrep_tokenize("A .. (B !< C . D) | ![<< (E , F) $ G]"), + ) + + def test_tokenize_link_types(self): + """ + Test tokenization of basic link types. + """ + self.assertEqual(tgrep.tgrep_tokenize("AB"), ["A", ">", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<3B"), ["A", "<3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>3B"), ["A", ">3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<,B"), ["A", "<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>,B"), ["A", ">,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<-3B"), ["A", "<-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>-3B"), ["A", ">-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<-B"), ["A", "<-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>-B"), ["A", ">-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<'B"), ["A", "<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>'B"), ["A", ">'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<:B"), ["A", "<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>:B"), ["A", ">:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<>B"), ["A", ">>", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<,B"), ["A", "<<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>,B"), ["A", ">>,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<'B"), ["A", "<<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>'B"), ["A", ">>'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<:B"), ["A", "<<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>:B"), ["A", ">>:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A.B"), ["A", ".", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A,B"), ["A", ",", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A..B"), ["A", "..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A,,B"), ["A", ",,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$B"), ["A", "$", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$.B"), ["A", "$.", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$,B"), ["A", "$,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$..B"), ["A", "$..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$,,B"), ["A", "$,,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!B"), ["A", "!", ">", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<3B"), ["A", "!", "<3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>3B"), ["A", "!", ">3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<,B"), ["A", "!", "<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>,B"), ["A", "!", ">,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<-3B"), ["A", "!", "<-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>-3B"), ["A", "!", ">-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<-B"), ["A", "!", "<-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>-B"), ["A", "!", ">-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<'B"), ["A", "!", "<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>'B"), ["A", "!", ">'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<:B"), ["A", "!", "<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>:B"), ["A", "!", ">:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<>B"), ["A", "!", ">>", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<,B"), ["A", "!", "<<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>,B"), ["A", "!", ">>,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<'B"), ["A", "!", "<<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>'B"), ["A", "!", ">>'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<:B"), ["A", "!", "<<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>:B"), ["A", "!", ">>:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!.B"), ["A", "!", ".", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!,B"), ["A", "!", ",", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!..B"), ["A", "!", "..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!,,B"), ["A", "!", ",,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$B"), ["A", "!", "$", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$.B"), ["A", "!", "$.", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$,B"), ["A", "!", "$,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$..B"), ["A", "!", "$..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$,,B"), ["A", "!", "$,,", "B"]) + + def test_tokenize_examples(self): + """ + Test tokenization of the TGrep2 manual example patterns. + """ + self.assertEqual(tgrep.tgrep_tokenize("NP < PP"), ["NP", "<", "PP"]) + self.assertEqual(tgrep.tgrep_tokenize("/^NP/"), ["/^NP/"]) + self.assertEqual( + tgrep.tgrep_tokenize("NP << PP . VP"), ["NP", "<<", "PP", ".", "VP"] + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP << PP | . VP"), ["NP", "<<", "PP", "|", ".", "VP"] + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP !<< PP [> NP | >> VP]"), + ["NP", "!", "<<", "PP", "[", ">", "NP", "|", ">>", "VP", "]"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP << (PP . VP)"), + ["NP", "<<", "(", "PP", ".", "VP", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP <' (PP <, (IN < on))"), + ["NP", "<'", "(", "PP", "<,", "(", "IN", "<", "on", ")", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < (A < B) < C"), + ["S", "<", "(", "A", "<", "B", ")", "<", "C"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < ((A < B) < C)"), + ["S", "<", "(", "(", "A", "<", "B", ")", "<", "C", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < (A < B < C)"), + ["S", "<", "(", "A", "<", "B", "<", "C", ")"], + ) + self.assertEqual(tgrep.tgrep_tokenize("A3B"3B"', "<", "C"], + ) + + def test_tokenize_nodenames(self): + """ + Test tokenization of node names. + """ + self.assertEqual(tgrep.tgrep_tokenize("Robert"), ["Robert"]) + self.assertEqual(tgrep.tgrep_tokenize("/^[Bb]ob/"), ["/^[Bb]ob/"]) + self.assertEqual(tgrep.tgrep_tokenize("*"), ["*"]) + self.assertEqual(tgrep.tgrep_tokenize("__"), ["__"]) + # test tokenization of NLTK tree position syntax + self.assertEqual(tgrep.tgrep_tokenize("N()"), ["N(", ")"]) + self.assertEqual(tgrep.tgrep_tokenize("N(0,)"), ["N(", "0", ",", ")"]) + self.assertEqual(tgrep.tgrep_tokenize("N(0,0)"), ["N(", "0", ",", "0", ")"]) + self.assertEqual( + tgrep.tgrep_tokenize("N(0,0,)"), ["N(", "0", ",", "0", ",", ")"] + ) + + def test_tokenize_macros(self): + """ + Test tokenization of macro definitions. + """ + self.assertEqual( + tgrep.tgrep_tokenize( + "@ NP /^NP/;\n@ NN /^NN/;\n@NP [!< NP | < @NN] !$.. @NN" + ), + [ + "@", + "NP", + "/^NP/", + ";", + "@", + "NN", + "/^NN/", + ";", + "@NP", + "[", + "!", + "<", + "NP", + "|", + "<", + "@NN", + "]", + "!", + "$..", + "@NN", + ], + ) + + def test_node_simple(self): + """ + Test a simple use of tgrep for finding nodes matching a given + pattern. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("NN", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual( + list(tgrep.tgrep_nodes("NN", [tree])), [[tree[0, 2], tree[2, 1]]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("NN|JJ", [tree])), [[(0, 1), (0, 2), (2, 1)]] + ) + + def test_node_printing(self): + """Test that the tgrep print operator ' is properly ignored.""" + tree = ParentedTree.fromstring("(S (n x) (N x))") + self.assertEqual( + list(tgrep.tgrep_positions("N", [tree])), + list(tgrep.tgrep_positions("'N", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_positions("/[Nn]/", [tree])), + list(tgrep.tgrep_positions("'/[Nn]/", [tree])), + ) + + def test_node_encoding(self): + """ + Test that tgrep search strings handles bytes and strs the same + way. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual( + list(tgrep.tgrep_positions(b"NN", [tree])), + list(tgrep.tgrep_positions(b"NN", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_nodes(b"NN", [tree])), + list(tgrep.tgrep_nodes("NN", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_positions(b"NN|JJ", [tree])), + list(tgrep.tgrep_positions("NN|JJ", [tree])), + ) + + def test_node_nocase(self): + """ + Test selecting nodes using case insensitive node names. + """ + tree = ParentedTree.fromstring("(S (n x) (N x))") + self.assertEqual(list(tgrep.tgrep_positions('"N"', [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions('i@"N"', [tree])), [[(0,), (1,)]]) + + def test_node_quoted(self): + """ + Test selecting nodes using quoted node names. + """ + tree = ParentedTree.fromstring('(N ("N" x) (N" x) ("\\" x))') + self.assertEqual(list(tgrep.tgrep_positions('"N"', [tree])), [[()]]) + self.assertEqual(list(tgrep.tgrep_positions('"\\"N\\""', [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions('"N\\""', [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions('"\\"\\\\\\""', [tree])), [[(2,)]]) + + def test_node_regex(self): + """ + Test regex matching on nodes. + """ + tree = ParentedTree.fromstring("(S (NP-SBJ x) (NP x) (NNP x) (VP x))") + # This is a regular expression that matches any node whose + # name starts with NP, including NP-SBJ: + self.assertEqual(list(tgrep.tgrep_positions("/^NP/", [tree])), [[(0,), (1,)]]) + + def test_node_regex_2(self): + """ + Test regex matching on nodes. + """ + tree = ParentedTree.fromstring("(S (SBJ x) (SBJ1 x) (NP-SBJ x))") + self.assertEqual(list(tgrep.tgrep_positions("/^SBJ/", [tree])), [[(0,), (1,)]]) + # This is a regular expression that matches any node whose + # name includes SBJ, including NP-SBJ: + self.assertEqual( + list(tgrep.tgrep_positions("/SBJ/", [tree])), [[(0,), (1,), (2,)]] + ) + + def test_node_tree_position(self): + """ + Test matching on nodes based on NLTK tree position. + """ + tree = ParentedTree.fromstring("(S (NP-SBJ x) (NP x) (NNP x) (VP x))") + # test all tree positions that are not leaves + leaf_positions = {tree.leaf_treeposition(x) for x in range(len(tree.leaves()))} + tree_positions = [x for x in tree.treepositions() if x not in leaf_positions] + for position in tree_positions: + node_id = f"N{position}" + tgrep_positions = list(tgrep.tgrep_positions(node_id, [tree])) + self.assertEqual(len(tgrep_positions[0]), 1) + self.assertEqual(tgrep_positions[0][0], position) + + def test_node_noleaves(self): + """ + Test node name matching with the search_leaves flag set to False. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertEqual( + list(tgrep.tgrep_positions("x", [tree])), [[(0, 0, 0), (1, 0, 0)]] + ) + self.assertEqual(list(tgrep.tgrep_positions("x", [tree], False)), [[]]) + + def tests_rel_dominance(self): + """ + Test matching nodes based on dominance relations. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertEqual(list(tgrep.tgrep_positions("* < T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* < T > S", [tree])), [[(0,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !< T", [tree])), + [[(), (0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0)]], + ) + self.assertEqual(list(tgrep.tgrep_positions("* !< T > S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* > A", [tree])), [[(0, 0)]]) + self.assertEqual(list(tgrep.tgrep_positions("* > B", [tree])), [[(1, 0)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !> B", [tree])), + [[(), (0,), (0, 0), (0, 0, 0), (1,), (1, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* !> B >> S", [tree])), [[(0,), (0, 0), (1,)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >> S", [tree])), + [[(0,), (0, 0), (1,), (1, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>, S", [tree])), [[(0,), (0, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>' S", [tree])), [[(1,), (1, 0)]] + ) + # Known issue: + # self.assertEqual(list(tgrep.tgrep_positions('* !>> S', [tree])), + # [[()]]) + self.assertEqual(list(tgrep.tgrep_positions("* << T", [tree])), [[(), (0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <<' T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <<1 N", [tree])), [[(1,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !<< T", [tree])), + [[(0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0)]], + ) + tree = ParentedTree.fromstring("(S (A (T x)) (B (T x) (N x )))") + self.assertEqual(list(tgrep.tgrep_positions("* <: T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* < T", [tree])), [[(0,), (1,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !<: T", [tree])), + [[(), (0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0), (1, 1), (1, 1, 0)]], + ) + self.assertEqual(list(tgrep.tgrep_positions("* !<: T > S", [tree])), [[(1,)]]) + tree = ParentedTree.fromstring("(S (T (A x) (B x)) (T (C x)))") + self.assertEqual(list(tgrep.tgrep_positions("* >: T", [tree])), [[(1, 0)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !>: T", [tree])), + [[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0, 0)]], + ) + tree = ParentedTree.fromstring( + "(S (A (B (C (D (E (T x))))))" " (A (B (C (D (E (T x))) (N x)))))" + ) + self.assertEqual( + list(tgrep.tgrep_positions("* <<: T", [tree])), + [ + [ + (0,), + (0, 0), + (0, 0, 0), + (0, 0, 0, 0), + (0, 0, 0, 0, 0), + (1, 0, 0, 0), + (1, 0, 0, 0, 0), + ] + ], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>: A", [tree])), + [ + [ + (0, 0), + (0, 0, 0), + (0, 0, 0, 0), + (0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0), + (1, 0), + (1, 0, 0), + ] + ], + ) + + def test_bad_operator(self): + """ + Test error handling of undefined tgrep operators. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertRaises( + tgrep.TgrepException, list, tgrep.tgrep_positions("* >>> S", [tree]) + ) + + def test_comments(self): + """ + Test that comments are correctly filtered out of tgrep search + strings. + """ + tree = ParentedTree.fromstring("(S (NN x) (NP x) (NN x))") + search1 = """ + @ NP /^NP/; + @ NN /^NN/; + @NN + """ + self.assertEqual(list(tgrep.tgrep_positions(search1, [tree])), [[(0,), (2,)]]) + search2 = """ + # macros + @ NP /^NP/; + @ NN /^NN/; + + # search string + @NN + """ + self.assertEqual(list(tgrep.tgrep_positions(search2, [tree])), [[(0,), (2,)]]) + + def test_rel_sister_nodes(self): + """ + Test matching sister nodes in a tree. + """ + tree = ParentedTree.fromstring("(S (A x) (B x) (C x))") + self.assertEqual(list(tgrep.tgrep_positions("* $. B", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $.. B", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $, B", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $,, B", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $ B", [tree])), [[(0,), (2,)]]) + + def tests_rel_indexed_children(self): + """ + Test matching nodes based on their index in their parent node. + """ + tree = ParentedTree.fromstring("(S (A x) (B x) (C x))") + self.assertEqual(list(tgrep.tgrep_positions("* >, S", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >1 S", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >2 S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >3 S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >' S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-1 S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-2 S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-3 S", [tree])), [[(0,)]]) + tree = ParentedTree.fromstring( + "(S (D (A x) (B x) (C x)) (E (B x) (C x) (A x)) " "(F (C x) (A x) (B x)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("* <, A", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <1 A", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <2 A", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <3 A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <' A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-1 A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-2 A", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-3 A", [tree])), [[(0,)]]) + + def test_rel_precedence(self): + """ + Test matching nodes based on precedence relations. + """ + tree = ParentedTree.fromstring( + "(S (NP (NP (PP x)) (NP (AP x)))" + " (VP (AP (X (PP x)) (Y (AP x))))" + " (NP (RC (NP (AP x)))))" + ) + self.assertEqual( + list(tgrep.tgrep_positions("* . X", [tree])), [[(0,), (0, 1), (0, 1, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* . Y", [tree])), [[(1, 0, 0), (1, 0, 0, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* .. X", [tree])), + [[(0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* .. Y", [tree])), + [[(0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1, 0, 0), (1, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* , X", [tree])), [[(1, 0, 1), (1, 0, 1, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* , Y", [tree])), + [[(2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* ,, X", [tree])), + [[(1, 0, 1), (1, 0, 1, 0), (2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* ,, Y", [tree])), + [[(2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + + def test_examples(self): + """ + Test the Basic Examples from the TGrep2 manual. + """ + tree = ParentedTree.fromstring("(S (NP (AP x)) (NP (PP x)))") + # This matches any NP node that immediately dominates a PP: + self.assertEqual(list(tgrep.tgrep_positions("NP < PP", [tree])), [[(1,)]]) + + tree = ParentedTree.fromstring("(S (NP x) (VP x) (NP (PP x)) (VP x))") + # This matches an NP that dominates a PP and is immediately + # followed by a VP: + self.assertEqual(list(tgrep.tgrep_positions("NP << PP . VP", [tree])), [[(2,)]]) + + tree = ParentedTree.fromstring( + "(S (NP (AP x)) (NP (PP x)) " "(NP (DET x) (NN x)) (VP x))" + ) + # This matches an NP that dominates a PP or is immediately + # followed by a VP: + self.assertEqual( + list(tgrep.tgrep_positions("NP << PP | . VP", [tree])), [[(1,), (2,)]] + ) + + tree = ParentedTree.fromstring( + "(S (NP (NP (PP x)) (NP (AP x)))" + " (VP (AP (NP (PP x)) (NP (AP x))))" + " (NP (RC (NP (AP x)))))" + ) + # This matches an NP that does not dominate a PP. Also, the NP + # must either have a parent that is an NP or be dominated by a + # VP: + self.assertEqual( + list(tgrep.tgrep_positions("NP !<< PP [> NP | >> VP]", [tree])), + [[(0, 1), (1, 0, 1)]], + ) + + tree = ParentedTree.fromstring( + "(S (NP (AP (PP x) (VP x))) " "(NP (AP (PP x) (NP x))) (NP x))" + ) + # This matches an NP that dominates a PP which itself is + # immediately followed by a VP. Note the use of parentheses to + # group ". VP" with the PP rather than with the NP: + self.assertEqual( + list(tgrep.tgrep_positions("NP << (PP . VP)", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (NP (DET a) (NN cat) (PP (IN on) (NP x)))" + " (NP (DET a) (NN cat) (PP (IN on) (NP x)) (PP x))" + " (NP x))" + ) + # This matches an NP whose last child is a PP that begins with + # the preposition "on": + self.assertEqual( + list(tgrep.tgrep_positions("NP <' (PP <, (IN < on))", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (S (C x) (A (B x))) (S (C x) (A x)) " "(S (D x) (A (B x))))" + ) + # The following pattern matches an S which has a child A and + # another child that is a C and that the A has a child B: + self.assertEqual( + list(tgrep.tgrep_positions("S < (A < B) < C", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (S (A (B x) (C x))) (S (S (C x) (A (B x)))))" + ) + # However, this pattern means that S has child A and that A + # has children B and C: + self.assertEqual( + list(tgrep.tgrep_positions("S < ((A < B) < C)", [tree])), [[(0,)]] + ) + + # It is equivalent to this: + self.assertEqual( + list(tgrep.tgrep_positions("S < (A < B < C)", [tree])), [[(0,)]] + ) + + def test_use_macros(self): + """ + Test defining and using tgrep2 macros. + """ + tree = ParentedTree.fromstring( + "(VP (VB sold) (NP (DET the) " + "(NN heiress)) (NP (NN deed) (PREP to) " + "(NP (DET the) (NN school) (NN house))))" + ) + self.assertEqual( + list( + tgrep.tgrep_positions( + "@ NP /^NP/;\n@ NN /^NN/;\n@NP !< @NP !$.. @NN", [tree] + ) + ), + [[(1,), (2, 2)]], + ) + # use undefined macro @CNP + self.assertRaises( + tgrep.TgrepException, + list, + tgrep.tgrep_positions( + "@ NP /^NP/;\n@ NN /^NN/;\n@CNP !< @NP !$.. @NN", [tree] + ), + ) + + def test_tokenize_node_labels(self): + """Test tokenization of labeled nodes.""" + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ < (@VP < (@VB $.. @OBJ))"), + [ + "S", + "<", + "@SBJ", + "<", + "(", + "@VP", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ=s < (@VP=v < (@VB $.. @OBJ))"), + [ + "S", + "<", + "@SBJ", + "=", + "s", + "<", + "(", + "@VP", + "=", + "v", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ], + ) + + def test_tokenize_segmented_patterns(self): + """Test tokenization of segmented patterns.""" + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ=s < (@VP=v < (@VB $.. @OBJ)) : =s .. =v"), + [ + "S", + "<", + "@SBJ", + "=", + "s", + "<", + "(", + "@VP", + "=", + "v", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ":", + "=s", + "..", + "=v", + ], + ) + + def test_labeled_nodes(self): + """ + Test labeled nodes. + + Test case from Emily M. Bender. + """ + search = """ + # macros + @ SBJ /SBJ/; + @ VP /VP/; + @ VB /VB/; + @ VPoB /V[PB]/; + @ OBJ /OBJ/; + + # 1 svo + S < @SBJ=s < (@VP=v < (@VB $.. @OBJ)) : =s .. =v""" + sent1 = ParentedTree.fromstring( + "(S (NP-SBJ I) (VP (VB eat) (NP-OBJ (NNS apples))))" + ) + sent2 = ParentedTree.fromstring( + "(S (VP (VB eat) (NP-OBJ (NNS apples))) (NP-SBJ I))" + ) + search_firsthalf = search.split("\n\n")[0] + "S < @SBJ < (@VP < (@VB $.. @OBJ))" + search_rewrite = "S < (/.*SBJ/ $.. (/VP/ < (/VB/ $.. /.*OBJ/)))" + + self.assertTrue(list(tgrep.tgrep_positions(search_firsthalf, [sent1]))[0]) + self.assertTrue(list(tgrep.tgrep_positions(search, [sent1]))[0]) + self.assertTrue(list(tgrep.tgrep_positions(search_rewrite, [sent1]))[0]) + self.assertEqual( + list(tgrep.tgrep_positions(search, [sent1])), + list(tgrep.tgrep_positions(search_rewrite, [sent1])), + ) + self.assertTrue(list(tgrep.tgrep_positions(search_firsthalf, [sent2]))[0]) + self.assertFalse(list(tgrep.tgrep_positions(search, [sent2]))[0]) + self.assertFalse(list(tgrep.tgrep_positions(search_rewrite, [sent2]))[0]) + self.assertEqual( + list(tgrep.tgrep_positions(search, [sent2])), + list(tgrep.tgrep_positions(search_rewrite, [sent2])), + ) + + def test_multiple_conjs(self): + """ + Test that multiple (3 or more) conjunctions of node relations are + handled properly. + """ + sent = ParentedTree.fromstring("((A (B b) (C c)) (A (B b) (C c) (D d)))") + # search = '(A < B < C < D)' + # search_tworels = '(A < B < C)' + self.assertEqual( + list(tgrep.tgrep_positions("(A < B < C < D)", [sent])), [[(1,)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("(A < B < C)", [sent])), [[(0,), (1,)]] + ) + + def test_trailing_semicolon(self): + """ + Test that semicolons at the end of a tgrep2 search string won't + cause a parse failure. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("NN", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual(list(tgrep.tgrep_positions("NN;", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual( + list(tgrep.tgrep_positions("NN;;", [tree])), [[(0, 2), (2, 1)]] + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_twitter_auth.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_twitter_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..5f9a830a0ad0158c6bba26364189fd8ad19907f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_twitter_auth.py @@ -0,0 +1,77 @@ +""" +Tests for static parts of Twitter package +""" + +import os + +import pytest + +pytest.importorskip("twython") + +from nltk.twitter import Authenticate + + +@pytest.fixture +def auth(): + return Authenticate() + + +class TestCredentials: + """ + Tests that Twitter credentials from a file are handled correctly. + """ + + @classmethod + def setup_class(self): + self.subdir = os.path.join(os.path.dirname(__file__), "files") + os.environ["TWITTER"] = "twitter-files" + + def test_environment(self, auth): + """ + Test that environment variable has been read correctly. + """ + fn = os.path.basename(auth.creds_subdir) + assert fn == os.environ["TWITTER"] + + @pytest.mark.parametrize( + "kwargs", + [ + # Each of the following scenarios should raise an error: + # An empty subdir path + {"subdir": ""}, + # A subdir path of None + {"subdir": None}, + # A nonexistent directory + {"subdir": "/nosuchdir"}, + # 'credentials.txt' is not in default subdir, as read from `os.environ['TWITTER']` + {}, + # Nonexistent credentials file ('foobar') + {"creds_file": "foobar"}, + # 'bad_oauth1-1.txt' is incomplete + {"creds_file": "bad_oauth1-1.txt"}, + # The first key in credentials file 'bad_oauth1-2.txt' is ill-formed + {"creds_file": "bad_oauth1-2.txt"}, + # The first two lines in 'bad_oauth1-3.txt' are collapsed + {"creds_file": "bad_oauth1-3.txt"}, + ], + ) + def test_scenarios_that_should_raise_errors(self, kwargs, auth): + """Various scenarios that should raise errors""" + try: + auth.load_creds(**kwargs) + # raises ValueError (zero length field name in format) for python 2.6 + # OSError for the rest + except (OSError, ValueError): + pass + except Exception as e: + pytest.fail("Unexpected exception thrown: %s" % e) + else: + pytest.fail("OSError exception not thrown.") + + def test_correct_file(self, auth): + """Test that a proper file succeeds and is read correctly""" + oauth = auth.load_creds(subdir=self.subdir) + + assert auth.creds_fullpath == os.path.join(self.subdir, auth.creds_file) + assert auth.creds_file == "credentials.txt" + assert oauth["app_key"] == "a" diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_util.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..31bb8611d34e52c62a80c459acad93e4d9fe3782 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/test_util.py @@ -0,0 +1,82 @@ +import pytest + +from nltk.util import everygrams + + +@pytest.fixture +def everygram_input(): + """Form test data for tests.""" + return iter(["a", "b", "c"]) + + +def test_everygrams_without_padding(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input)) + assert output == expected_output + + +def test_everygrams_max_len(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input, max_len=2)) + assert output == expected_output + + +def test_everygrams_min_len(everygram_input): + expected_output = [ + ("a", "b"), + ("a", "b", "c"), + ("b", "c"), + ] + output = list(everygrams(everygram_input, min_len=2)) + assert output == expected_output + + +def test_everygrams_pad_right(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("b", "c", None), + ("c",), + ("c", None), + ("c", None, None), + (None,), + (None, None), + (None,), + ] + output = list(everygrams(everygram_input, max_len=3, pad_right=True)) + assert output == expected_output + + +def test_everygrams_pad_left(everygram_input): + expected_output = [ + (None,), + (None, None), + (None, None, "a"), + (None,), + (None, "a"), + (None, "a", "b"), + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input, max_len=3, pad_left=True)) + assert output == expected_output diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3481297271692016f34e14fd1067defce48dfe08 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm4.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6bee8c8c6e13385de3657d6e8be8438fad2e0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm4.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..226978ae56d92e3f08e7229aafc7e41f5886d4fd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33b5d005b27b674a357ba2c0c62ecd93f875e0d3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_stack_decoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_stack_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35ffad7a6857b39943afc119ac19a4533ea31997 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_stack_decoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa1e07903036885be24a23392ea68c16065dfde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py @@ -0,0 +1,405 @@ +""" +Tests for BLEU translation evaluation metric +""" + +import io +import unittest + +from nltk.data import find +from nltk.translate.bleu_score import ( + SmoothingFunction, + brevity_penalty, + closest_ref_length, + corpus_bleu, + modified_precision, + sentence_bleu, +) + + +class TestBLEU(unittest.TestCase): + def test_modified_precision(self): + """ + Examples from the original BLEU paper + https://www.aclweb.org/anthology/P02-1040.pdf + """ + # Example 1: the "the*" example. + # Reference sentences. + ref1 = "the cat is on the mat".split() + ref2 = "there is a cat on the mat".split() + # Hypothesis sentence(s). + hyp1 = "the the the the the the the".split() + + references = [ref1, ref2] + + # Testing modified unigram precision. + hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) + assert round(hyp1_unigram_precision, 4) == 0.2857 + # With assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_unigram_precision, 0.28571428, places=4) + + # Testing modified bigram precision. + assert float(modified_precision(references, hyp1, n=2)) == 0.0 + + # Example 2: the "of the" example. + # Reference sentences + ref1 = str( + "It is a guide to action that ensures that the military " + "will forever heed Party commands" + ).split() + ref2 = str( + "It is the guiding principle which guarantees the military " + "forces always being under the command of the Party" + ).split() + ref3 = str( + "It is the practical guide for the army always to heed " + "the directions of the party" + ).split() + # Hypothesis sentence(s). + hyp1 = "of the".split() + + references = [ref1, ref2, ref3] + # Testing modified unigram precision. + assert float(modified_precision(references, hyp1, n=1)) == 1.0 + + # Testing modified bigram precision. + assert float(modified_precision(references, hyp1, n=2)) == 1.0 + + # Example 3: Proper MT outputs. + hyp1 = str( + "It is a guide to action which ensures that the military " + "always obeys the commands of the party" + ).split() + hyp2 = str( + "It is to insure the troops forever hearing the activity " + "guidebook that party direct" + ).split() + + references = [ref1, ref2, ref3] + + # Unigram precision. + hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) + hyp2_unigram_precision = float(modified_precision(references, hyp2, n=1)) + # Test unigram precision with assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_unigram_precision, 0.94444444, places=4) + self.assertAlmostEqual(hyp2_unigram_precision, 0.57142857, places=4) + # Test unigram precision with rounding. + assert round(hyp1_unigram_precision, 4) == 0.9444 + assert round(hyp2_unigram_precision, 4) == 0.5714 + + # Bigram precision + hyp1_bigram_precision = float(modified_precision(references, hyp1, n=2)) + hyp2_bigram_precision = float(modified_precision(references, hyp2, n=2)) + # Test bigram precision with assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_bigram_precision, 0.58823529, places=4) + self.assertAlmostEqual(hyp2_bigram_precision, 0.07692307, places=4) + # Test bigram precision with rounding. + assert round(hyp1_bigram_precision, 4) == 0.5882 + assert round(hyp2_bigram_precision, 4) == 0.0769 + + def test_brevity_penalty(self): + # Test case from brevity_penalty_closest function in mteval-v13a.pl. + # Same test cases as in the doctest in nltk.translate.bleu_score.py + references = [["a"] * 11, ["a"] * 8] + hypothesis = ["a"] * 7 + hyp_len = len(hypothesis) + closest_ref_len = closest_ref_length(references, hyp_len) + self.assertAlmostEqual( + brevity_penalty(closest_ref_len, hyp_len), 0.8669, places=4 + ) + + references = [["a"] * 11, ["a"] * 8, ["a"] * 6, ["a"] * 7] + hypothesis = ["a"] * 7 + hyp_len = len(hypothesis) + closest_ref_len = closest_ref_length(references, hyp_len) + assert brevity_penalty(closest_ref_len, hyp_len) == 1.0 + + def test_zero_matches(self): + # Test case where there's 0 matches + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = "John loves Mary".split() + + # Test BLEU to nth order of n-grams, where n is len(hypothesis). + for n in range(1, len(hypothesis)): + weights = (1.0 / n,) * n # Uniform weights. + assert sentence_bleu(references, hypothesis, weights) == 0 + + def test_full_matches(self): + # Test case where there's 100% matches + references = ["John loves Mary".split()] + hypothesis = "John loves Mary".split() + + # Test BLEU to nth order of n-grams, where n is len(hypothesis). + for n in range(1, len(hypothesis)): + weights = (1.0 / n,) * n # Uniform weights. + assert sentence_bleu(references, hypothesis, weights) == 1.0 + + def test_partial_matches_hypothesis_longer_than_reference(self): + references = ["John loves Mary".split()] + hypothesis = "John loves Mary who loves Mike".split() + # Since no 4-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) + # Checks that the warning has been raised because len(reference) < 4. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + +# @unittest.skip("Skipping fringe cases for BLEU.") +class TestBLEUFringeCases(unittest.TestCase): + def test_case_where_n_is_bigger_than_hypothesis_length(self): + # Test BLEU to nth order of n-grams, where n > len(hypothesis). + references = ["John loves Mary ?".split()] + hypothesis = "John loves Mary".split() + n = len(hypothesis) + 1 # + weights = (1.0 / n,) * n # Uniform weights. + # Since no n-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual( + sentence_bleu(references, hypothesis, weights), 0.0, places=4 + ) + # Checks that the warning has been raised because len(hypothesis) < 4. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + # Test case where n > len(hypothesis) but so is n > len(reference), and + # it's a special case where reference == hypothesis. + references = ["John loves Mary".split()] + hypothesis = "John loves Mary".split() + # Since no 4-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual( + sentence_bleu(references, hypothesis, weights), 0.0, places=4 + ) + + def test_empty_hypothesis(self): + # Test case where there's hypothesis is empty. + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = [] + assert sentence_bleu(references, hypothesis) == 0 + + def test_length_one_hypothesis(self): + # Test case where there's hypothesis is of length 1 in Smoothing method 4. + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = ["Foo"] + method4 = SmoothingFunction().method4 + try: + sentence_bleu(references, hypothesis, smoothing_function=method4) + except ValueError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + def test_empty_references(self): + # Test case where there's reference is empty. + references = [[]] + hypothesis = "John loves Mary".split() + assert sentence_bleu(references, hypothesis) == 0 + + def test_empty_references_and_hypothesis(self): + # Test case where both references and hypothesis is empty. + references = [[]] + hypothesis = [] + assert sentence_bleu(references, hypothesis) == 0 + + def test_reference_or_hypothesis_shorter_than_fourgrams(self): + # Test case where the length of reference or hypothesis + # is shorter than 4. + references = ["let it go".split()] + hypothesis = "let go it".split() + # Checks that the value the hypothesis and reference returns is 0.0 + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) + # Checks that the warning has been raised. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + +class TestBLEUvsMteval13a(unittest.TestCase): + def test_corpus_bleu(self): + ref_file = find("models/wmt15_eval/ref.ru") + hyp_file = find("models/wmt15_eval/google.ru") + mteval_output_file = find("models/wmt15_eval/mteval-13a.output") + + # Reads the BLEU scores from the `mteval-13a.output` file. + # The order of the list corresponds to the order of the ngrams. + with open(mteval_output_file) as mteval_fin: + # The numbers are located in the last 2nd line of the file. + # The first and 2nd item in the list are the score and system names. + mteval_bleu_scores = map(float, mteval_fin.readlines()[-2].split()[1:-1]) + + with open(ref_file, encoding="utf8") as ref_fin: + with open(hyp_file, encoding="utf8") as hyp_fin: + # Whitespace tokenize the file. + # Note: split() automatically strip(). + hypothesis = list(map(lambda x: x.split(), hyp_fin)) + # Note that the corpus_bleu input is list of list of references. + references = list(map(lambda x: [x.split()], ref_fin)) + # Without smoothing. + for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): + nltk_bleu = corpus_bleu( + references, hypothesis, weights=(1.0 / i,) * i + ) + # Check that the BLEU scores difference is less than 0.005 . + # Note: This is an approximate comparison; as much as + # +/- 0.01 BLEU might be "statistically significant", + # the actual translation quality might not be. + assert abs(mteval_bleu - nltk_bleu) < 0.005 + + # With the same smoothing method used in mteval-v13a.pl + chencherry = SmoothingFunction() + for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): + nltk_bleu = corpus_bleu( + references, + hypothesis, + weights=(1.0 / i,) * i, + smoothing_function=chencherry.method3, + ) + assert abs(mteval_bleu - nltk_bleu) < 0.005 + + +class TestBLEUWithBadSentence(unittest.TestCase): + def test_corpus_bleu_with_bad_sentence(self): + hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R" + ref = str( + "Their tasks include changing a pump on the faulty stokehold ." + "Likewise , two species that are very similar in morphology " + "were distinguished using genetics ." + ) + references = [[ref.split()]] + hypotheses = [hyp.split()] + try: # Check that the warning is raised since no. of 2-grams < 0. + with self.assertWarns(UserWarning): + # Verify that the BLEU output is undesired since no. of 2-grams < 0. + self.assertAlmostEqual( + corpus_bleu(references, hypotheses), 0.0, places=4 + ) + except AttributeError: # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4) + + +class TestBLEUWithMultipleWeights(unittest.TestCase): + def test_corpus_bleu_with_multiple_weights(self): + hyp1 = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "which", + "ensures", + "that", + "the", + "military", + "always", + "obeys", + "the", + "commands", + "of", + "the", + "party", + ] + ref1a = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "that", + "ensures", + "that", + "the", + "military", + "will", + "forever", + "heed", + "Party", + "commands", + ] + ref1b = [ + "It", + "is", + "the", + "guiding", + "principle", + "which", + "guarantees", + "the", + "military", + "forces", + "always", + "being", + "under", + "the", + "command", + "of", + "the", + "Party", + ] + ref1c = [ + "It", + "is", + "the", + "practical", + "guide", + "for", + "the", + "army", + "always", + "to", + "heed", + "the", + "directions", + "of", + "the", + "party", + ] + hyp2 = [ + "he", + "read", + "the", + "book", + "because", + "he", + "was", + "interested", + "in", + "world", + "history", + ] + ref2a = [ + "he", + "was", + "interested", + "in", + "world", + "history", + "because", + "he", + "read", + "the", + "book", + ] + weight_1 = (1, 0, 0, 0) + weight_2 = (0.25, 0.25, 0.25, 0.25) + weight_3 = (0, 0, 0, 0, 1) + + bleu_scores = corpus_bleu( + list_of_references=[[ref1a, ref1b, ref1c], [ref2a]], + hypotheses=[hyp1, hyp2], + weights=[weight_1, weight_2, weight_3], + ) + assert bleu_scores[0] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_1 + ) + assert bleu_scores[1] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_2 + ) + assert bleu_scores[2] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_3 + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm2.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm2.py new file mode 100644 index 0000000000000000000000000000000000000000..e2194dde9aabd503489e4f961b85da550b56d7c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm2.py @@ -0,0 +1,86 @@ +""" +Tests for IBM Model 2 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel2 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel2(unittest.TestCase): + def test_set_uniform_alignment_probabilities(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model2 = IBMModel2(corpus, 0) + + # act + model2.set_uniform_probabilities(corpus) + + # assert + # expected_prob = 1.0 / (length of source sentence + 1) + self.assertEqual(model2.alignment_table[0][1][3][2], 1.0 / 4) + self.assertEqual(model2.alignment_table[2][4][2][4], 1.0 / 3) + + def test_set_uniform_alignment_probabilities_of_non_domain_values(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model2 = IBMModel2(corpus, 0) + + # act + model2.set_uniform_probabilities(corpus) + + # assert + # examine i and j values that are not in the training data domain + self.assertEqual(model2.alignment_table[99][1][3][2], IBMModel.MIN_PROB) + self.assertEqual(model2.alignment_table[2][99][2][4], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + None, + ) + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + alignment_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) + ) + alignment_table[0][3][5][6] = 0.97 # None -> to + alignment_table[1][1][5][6] = 0.97 # ich -> i + alignment_table[2][4][5][6] = 0.97 # esse -> eat + alignment_table[4][2][5][6] = 0.97 # gern -> love + alignment_table[5][5][5][6] = 0.96 # räucherschinken -> smoked + alignment_table[5][6][5][6] = 0.96 # räucherschinken -> ham + + model2 = IBMModel2(corpus, 0) + model2.translation_table = translation_table + model2.alignment_table = alignment_table + + # act + probability = model2.prob_t_a_given_s(alignment_info) + + # assert + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96 + expected_probability = lexical_translation * alignment + self.assertEqual(round(probability, 4), round(expected_probability, 4)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py new file mode 100644 index 0000000000000000000000000000000000000000..7c29c47de230c0e128cb969514787b2ded0451ef --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py @@ -0,0 +1,160 @@ +""" +Tests for IBM Model 5 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel4, IBMModel5 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel5(unittest.TestCase): + def test_set_uniform_vacancy_probabilities_of_max_displacements(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model5 = IBMModel5(corpus, 0, src_classes, trg_classes) + + # act + model5.set_uniform_probabilities(corpus) + + # assert + # number of vacancy difference values = + # 2 * number of words in longest target sentence + expected_prob = 1.0 / (2 * 4) + + # examine the boundary values for (dv, max_v, trg_class) + self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob) + self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob) + self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob) + self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob) + + def test_set_uniform_vacancy_probabilities_of_non_domain_values(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model5 = IBMModel5(corpus, 0, src_classes, trg_classes) + + # act + model5.set_uniform_probabilities(corpus) + + # assert + # examine dv and max_v values that are not in the training data domain + self.assertEqual(model5.head_vacancy_table[5][4][0], IBMModel.MIN_PROB) + self.assertEqual(model5.head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) + self.assertEqual(model5.head_vacancy_table[4][0][0], IBMModel.MIN_PROB) + self.assertEqual(model5.non_head_vacancy_table[5][4][0], IBMModel.MIN_PROB) + self.assertEqual(model5.non_head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} + trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + [[3], [1], [4], [], [2], [5, 6]], + ) + + head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(float)) + ) + head_vacancy_table[1 - 0][6][3] = 0.97 # ich -> i + head_vacancy_table[3 - 0][5][4] = 0.97 # esse -> eat + head_vacancy_table[1 - 2][4][4] = 0.97 # gern -> love + head_vacancy_table[2 - 0][2][1] = 0.97 # räucherschinken -> smoked + + non_head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(float)) + ) + non_head_vacancy_table[1 - 0][1][0] = 0.96 # räucherschinken -> ham + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + fertility_table = defaultdict(lambda: defaultdict(float)) + fertility_table[1]["ich"] = 0.99 + fertility_table[1]["esse"] = 0.99 + fertility_table[0]["ja"] = 0.99 + fertility_table[1]["gern"] = 0.99 + fertility_table[2]["räucherschinken"] = 0.999 + fertility_table[1][None] = 0.99 + + probabilities = { + "p1": 0.167, + "translation_table": translation_table, + "fertility_table": fertility_table, + "head_vacancy_table": head_vacancy_table, + "non_head_vacancy_table": non_head_vacancy_table, + "head_distortion_table": None, + "non_head_distortion_table": None, + "alignment_table": None, + } + + model5 = IBMModel5(corpus, 0, src_classes, trg_classes, probabilities) + + # act + probability = model5.prob_t_a_given_s(alignment_info) + + # assert + null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) + fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 + expected_probability = ( + null_generation * fertility * lexical_translation * vacancy + ) + self.assertEqual(round(probability, 4), round(expected_probability, 4)) + + def test_prune(self): + # arrange + alignment_infos = [ + AlignmentInfo((1, 1), None, None, None), + AlignmentInfo((1, 2), None, None, None), + AlignmentInfo((2, 1), None, None, None), + AlignmentInfo((2, 2), None, None, None), + AlignmentInfo((0, 0), None, None, None), + ] + min_factor = IBMModel5.MIN_SCORE_FACTOR + best_score = 0.9 + scores = { + (1, 1): min(min_factor * 1.5, 1) * best_score, # above threshold + (1, 2): best_score, + (2, 1): min_factor * best_score, # at threshold + (2, 2): min_factor * best_score * 0.5, # low score + (0, 0): min(min_factor * 1.1, 1) * 1.2, # above threshold + } + corpus = [AlignedSent(["a"], ["b"])] + original_prob_function = IBMModel4.model4_prob_t_a_given_s + # mock static method + IBMModel4.model4_prob_t_a_given_s = staticmethod( + lambda a, model: scores[a.alignment] + ) + model5 = IBMModel5(corpus, 0, None, None) + + # act + pruned_alignments = model5.prune(alignment_infos) + + # assert + self.assertEqual(len(pruned_alignments), 3) + + # restore static method + IBMModel4.model4_prob_t_a_given_s = original_prob_function diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm_model.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm_model.py new file mode 100644 index 0000000000000000000000000000000000000000..2707fc6e8c8825c9e1c042cfcb28b3edacff3e87 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm_model.py @@ -0,0 +1,269 @@ +""" +Tests for common methods of IBM translation models +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel(unittest.TestCase): + __TEST_SRC_SENTENCE = ["j'", "aime", "bien", "jambon"] + __TEST_TRG_SENTENCE = ["i", "love", "ham"] + + def test_vocabularies_are_initialized(self): + parallel_corpora = [ + AlignedSent(["one", "two", "three", "four"], ["un", "deux", "trois"]), + AlignedSent(["five", "one", "six"], ["quatre", "cinq", "six"]), + AlignedSent([], ["sept"]), + ] + + ibm_model = IBMModel(parallel_corpora) + self.assertEqual(len(ibm_model.src_vocab), 8) + self.assertEqual(len(ibm_model.trg_vocab), 6) + + def test_vocabularies_are_initialized_even_with_empty_corpora(self): + parallel_corpora = [] + + ibm_model = IBMModel(parallel_corpora) + self.assertEqual(len(ibm_model.src_vocab), 1) # addition of NULL token + self.assertEqual(len(ibm_model.trg_vocab), 0) + + def test_best_model2_alignment(self): + # arrange + sentence_pair = AlignedSent( + TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE + ) + # None and 'bien' have zero fertility + translation_table = { + "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, + "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, + "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, + } + alignment_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) + ) + + ibm_model = IBMModel([]) + ibm_model.translation_table = translation_table + ibm_model.alignment_table = alignment_table + + # act + a_info = ibm_model.best_model2_alignment(sentence_pair) + + # assert + self.assertEqual(a_info.alignment[1:], (1, 2, 4)) # 0th element unused + self.assertEqual(a_info.cepts, [[], [1], [2], [], [3]]) + + def test_best_model2_alignment_does_not_change_pegged_alignment(self): + # arrange + sentence_pair = AlignedSent( + TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE + ) + translation_table = { + "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, + "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, + "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, + } + alignment_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) + ) + + ibm_model = IBMModel([]) + ibm_model.translation_table = translation_table + ibm_model.alignment_table = alignment_table + + # act: force 'love' to be pegged to 'jambon' + a_info = ibm_model.best_model2_alignment(sentence_pair, 2, 4) + # assert + self.assertEqual(a_info.alignment[1:], (1, 4, 4)) + self.assertEqual(a_info.cepts, [[], [1], [], [], [2, 3]]) + + def test_best_model2_alignment_handles_fertile_words(self): + # arrange + sentence_pair = AlignedSent( + ["i", "really", ",", "really", "love", "ham"], + TestIBMModel.__TEST_SRC_SENTENCE, + ) + # 'bien' produces 2 target words: 'really' and another 'really' + translation_table = { + "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, + "really": {"j'": 0, "aime": 0, "bien": 0.9, "jambon": 0.01, None: 0.09}, + ",": {"j'": 0, "aime": 0, "bien": 0.3, "jambon": 0, None: 0.7}, + "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, + "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, + } + alignment_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) + ) + + ibm_model = IBMModel([]) + ibm_model.translation_table = translation_table + ibm_model.alignment_table = alignment_table + + # act + a_info = ibm_model.best_model2_alignment(sentence_pair) + + # assert + self.assertEqual(a_info.alignment[1:], (1, 3, 0, 3, 2, 4)) + self.assertEqual(a_info.cepts, [[3], [1], [5], [2, 4], [6]]) + + def test_best_model2_alignment_handles_empty_src_sentence(self): + # arrange + sentence_pair = AlignedSent(TestIBMModel.__TEST_TRG_SENTENCE, []) + ibm_model = IBMModel([]) + + # act + a_info = ibm_model.best_model2_alignment(sentence_pair) + + # assert + self.assertEqual(a_info.alignment[1:], (0, 0, 0)) + self.assertEqual(a_info.cepts, [[1, 2, 3]]) + + def test_best_model2_alignment_handles_empty_trg_sentence(self): + # arrange + sentence_pair = AlignedSent([], TestIBMModel.__TEST_SRC_SENTENCE) + ibm_model = IBMModel([]) + + # act + a_info = ibm_model.best_model2_alignment(sentence_pair) + + # assert + self.assertEqual(a_info.alignment[1:], ()) + self.assertEqual(a_info.cepts, [[], [], [], [], []]) + + def test_neighboring_finds_neighbor_alignments(self): + # arrange + a_info = AlignmentInfo( + (0, 3, 2), + (None, "des", "œufs", "verts"), + ("UNUSED", "green", "eggs"), + [[], [], [2], [1]], + ) + ibm_model = IBMModel([]) + + # act + neighbors = ibm_model.neighboring(a_info) + + # assert + neighbor_alignments = set() + for neighbor in neighbors: + neighbor_alignments.add(neighbor.alignment) + expected_alignments = { + # moves + (0, 0, 2), + (0, 1, 2), + (0, 2, 2), + (0, 3, 0), + (0, 3, 1), + (0, 3, 3), + # swaps + (0, 2, 3), + # original alignment + (0, 3, 2), + } + self.assertEqual(neighbor_alignments, expected_alignments) + + def test_neighboring_sets_neighbor_alignment_info(self): + # arrange + a_info = AlignmentInfo( + (0, 3, 2), + (None, "des", "œufs", "verts"), + ("UNUSED", "green", "eggs"), + [[], [], [2], [1]], + ) + ibm_model = IBMModel([]) + + # act + neighbors = ibm_model.neighboring(a_info) + + # assert: select a few particular alignments + for neighbor in neighbors: + if neighbor.alignment == (0, 2, 2): + moved_alignment = neighbor + elif neighbor.alignment == (0, 3, 2): + swapped_alignment = neighbor + + self.assertEqual(moved_alignment.cepts, [[], [], [1, 2], []]) + self.assertEqual(swapped_alignment.cepts, [[], [], [2], [1]]) + + def test_neighboring_returns_neighbors_with_pegged_alignment(self): + # arrange + a_info = AlignmentInfo( + (0, 3, 2), + (None, "des", "œufs", "verts"), + ("UNUSED", "green", "eggs"), + [[], [], [2], [1]], + ) + ibm_model = IBMModel([]) + + # act: peg 'eggs' to align with 'œufs' + neighbors = ibm_model.neighboring(a_info, 2) + + # assert + neighbor_alignments = set() + for neighbor in neighbors: + neighbor_alignments.add(neighbor.alignment) + expected_alignments = { + # moves + (0, 0, 2), + (0, 1, 2), + (0, 2, 2), + # no swaps + # original alignment + (0, 3, 2), + } + self.assertEqual(neighbor_alignments, expected_alignments) + + def test_hillclimb(self): + # arrange + initial_alignment = AlignmentInfo((0, 3, 2), None, None, None) + + def neighboring_mock(a, j): + if a.alignment == (0, 3, 2): + return { + AlignmentInfo((0, 2, 2), None, None, None), + AlignmentInfo((0, 1, 1), None, None, None), + } + elif a.alignment == (0, 2, 2): + return { + AlignmentInfo((0, 3, 3), None, None, None), + AlignmentInfo((0, 4, 4), None, None, None), + } + return set() + + def prob_t_a_given_s_mock(a): + prob_values = { + (0, 3, 2): 0.5, + (0, 2, 2): 0.6, + (0, 1, 1): 0.4, + (0, 3, 3): 0.6, + (0, 4, 4): 0.7, + } + return prob_values.get(a.alignment, 0.01) + + ibm_model = IBMModel([]) + ibm_model.neighboring = neighboring_mock + ibm_model.prob_t_a_given_s = prob_t_a_given_s_mock + + # act + best_alignment = ibm_model.hillclimb(initial_alignment) + + # assert: hill climbing goes from (0, 3, 2) -> (0, 2, 2) -> (0, 4, 4) + self.assertEqual(best_alignment.alignment, (0, 4, 4)) + + def test_sample(self): + # arrange + sentence_pair = AlignedSent( + TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE + ) + ibm_model = IBMModel([]) + ibm_model.prob_t_a_given_s = lambda x: 0.001 + + # act + samples, best_alignment = ibm_model.sample(sentence_pair) + + # assert + self.assertEqual(len(samples), 61) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_nist.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_nist.py new file mode 100644 index 0000000000000000000000000000000000000000..1bb8829abaf40e892680f83b283bcb23884f386b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_nist.py @@ -0,0 +1,36 @@ +""" +Tests for NIST translation evaluation metric +""" + +import io +import unittest + +from nltk.data import find +from nltk.translate.nist_score import corpus_nist + + +class TestNIST(unittest.TestCase): + def test_sentence_nist(self): + ref_file = find("models/wmt15_eval/ref.ru") + hyp_file = find("models/wmt15_eval/google.ru") + mteval_output_file = find("models/wmt15_eval/mteval-13a.output") + + # Reads the NIST scores from the `mteval-13a.output` file. + # The order of the list corresponds to the order of the ngrams. + with open(mteval_output_file) as mteval_fin: + # The numbers are located in the last 4th line of the file. + # The first and 2nd item in the list are the score and system names. + mteval_nist_scores = map(float, mteval_fin.readlines()[-4].split()[1:-1]) + + with open(ref_file, encoding="utf8") as ref_fin: + with open(hyp_file, encoding="utf8") as hyp_fin: + # Whitespace tokenize the file. + # Note: split() automatically strip(). + hypotheses = list(map(lambda x: x.split(), hyp_fin)) + # Note that the corpus_bleu input is list of list of references. + references = list(map(lambda x: [x.split()], ref_fin)) + # Without smoothing. + for i, mteval_nist in zip(range(1, 10), mteval_nist_scores): + nltk_nist = corpus_nist(references, hypotheses, i) + # Check that the NIST scores difference is less than 0.5 + assert abs(mteval_nist - nltk_nist) < 0.05 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_stack_decoder.py b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_stack_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c3eb5de16fb522293c18fc07a16436f54a2940 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/test/unit/translate/test_stack_decoder.py @@ -0,0 +1,294 @@ +# Natural Language Toolkit: Stack decoder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Tests for stack decoder +""" + +import unittest +from collections import defaultdict +from math import log + +from nltk.translate import PhraseTable, StackDecoder +from nltk.translate.stack_decoder import _Hypothesis, _Stack + + +class TestStackDecoder(unittest.TestCase): + def test_find_all_src_phrases(self): + # arrange + phrase_table = TestStackDecoder.create_fake_phrase_table() + stack_decoder = StackDecoder(phrase_table, None) + sentence = ("my", "hovercraft", "is", "full", "of", "eels") + + # act + src_phrase_spans = stack_decoder.find_all_src_phrases(sentence) + + # assert + self.assertEqual(src_phrase_spans[0], [2]) # 'my hovercraft' + self.assertEqual(src_phrase_spans[1], [2]) # 'hovercraft' + self.assertEqual(src_phrase_spans[2], [3]) # 'is' + self.assertEqual(src_phrase_spans[3], [5, 6]) # 'full of', 'full of eels' + self.assertFalse(src_phrase_spans[4]) # no entry starting with 'of' + self.assertEqual(src_phrase_spans[5], [6]) # 'eels' + + def test_distortion_score(self): + # arrange + stack_decoder = StackDecoder(None, None) + stack_decoder.distortion_factor = 0.5 + hypothesis = _Hypothesis() + hypothesis.src_phrase_span = (3, 5) + + # act + score = stack_decoder.distortion_score(hypothesis, (8, 10)) + + # assert + expected_score = log(stack_decoder.distortion_factor) * (8 - 5) + self.assertEqual(score, expected_score) + + def test_distortion_score_of_first_expansion(self): + # arrange + stack_decoder = StackDecoder(None, None) + stack_decoder.distortion_factor = 0.5 + hypothesis = _Hypothesis() + + # act + score = stack_decoder.distortion_score(hypothesis, (8, 10)) + + # assert + # expansion from empty hypothesis always has zero distortion cost + self.assertEqual(score, 0.0) + + def test_compute_future_costs(self): + # arrange + phrase_table = TestStackDecoder.create_fake_phrase_table() + language_model = TestStackDecoder.create_fake_language_model() + stack_decoder = StackDecoder(phrase_table, language_model) + sentence = ("my", "hovercraft", "is", "full", "of", "eels") + + # act + future_scores = stack_decoder.compute_future_scores(sentence) + + # assert + self.assertEqual( + future_scores[1][2], + ( + phrase_table.translations_for(("hovercraft",))[0].log_prob + + language_model.probability(("hovercraft",)) + ), + ) + self.assertEqual( + future_scores[0][2], + ( + phrase_table.translations_for(("my", "hovercraft"))[0].log_prob + + language_model.probability(("my", "hovercraft")) + ), + ) + + def test_compute_future_costs_for_phrases_not_in_phrase_table(self): + # arrange + phrase_table = TestStackDecoder.create_fake_phrase_table() + language_model = TestStackDecoder.create_fake_language_model() + stack_decoder = StackDecoder(phrase_table, language_model) + sentence = ("my", "hovercraft", "is", "full", "of", "eels") + + # act + future_scores = stack_decoder.compute_future_scores(sentence) + + # assert + self.assertEqual( + future_scores[1][3], # 'hovercraft is' is not in phrase table + future_scores[1][2] + future_scores[2][3], + ) # backoff + + def test_future_score(self): + # arrange: sentence with 8 words; words 2, 3, 4 already translated + hypothesis = _Hypothesis() + hypothesis.untranslated_spans = lambda _: [(0, 2), (5, 8)] # mock + future_score_table = defaultdict(lambda: defaultdict(float)) + future_score_table[0][2] = 0.4 + future_score_table[5][8] = 0.5 + stack_decoder = StackDecoder(None, None) + + # act + future_score = stack_decoder.future_score(hypothesis, future_score_table, 8) + + # assert + self.assertEqual(future_score, 0.4 + 0.5) + + def test_valid_phrases(self): + # arrange + hypothesis = _Hypothesis() + # mock untranslated_spans method + hypothesis.untranslated_spans = lambda _: [(0, 2), (3, 6)] + all_phrases_from = [[1, 4], [2], [], [5], [5, 6, 7], [], [7]] + + # act + phrase_spans = StackDecoder.valid_phrases(all_phrases_from, hypothesis) + + # assert + self.assertEqual(phrase_spans, [(0, 1), (1, 2), (3, 5), (4, 5), (4, 6)]) + + @staticmethod + def create_fake_phrase_table(): + phrase_table = PhraseTable() + phrase_table.add(("hovercraft",), ("",), 0.8) + phrase_table.add(("my", "hovercraft"), ("", ""), 0.7) + phrase_table.add(("my", "cheese"), ("", ""), 0.7) + phrase_table.add(("is",), ("",), 0.8) + phrase_table.add(("is",), ("",), 0.5) + phrase_table.add(("full", "of"), ("", ""), 0.01) + phrase_table.add(("full", "of", "eels"), ("", "", ""), 0.5) + phrase_table.add(("full", "of", "spam"), ("", ""), 0.5) + phrase_table.add(("eels",), ("",), 0.5) + phrase_table.add(("spam",), ("",), 0.5) + return phrase_table + + @staticmethod + def create_fake_language_model(): + # nltk.model should be used here once it is implemented + language_prob = defaultdict(lambda: -999.0) + language_prob[("my",)] = log(0.1) + language_prob[("hovercraft",)] = log(0.1) + language_prob[("is",)] = log(0.1) + language_prob[("full",)] = log(0.1) + language_prob[("of",)] = log(0.1) + language_prob[("eels",)] = log(0.1) + language_prob[("my", "hovercraft")] = log(0.3) + language_model = type( + "", (object,), {"probability": lambda _, phrase: language_prob[phrase]} + )() + return language_model + + +class TestHypothesis(unittest.TestCase): + def setUp(self): + root = _Hypothesis() + child = _Hypothesis( + raw_score=0.5, + src_phrase_span=(3, 7), + trg_phrase=("hello", "world"), + previous=root, + ) + grandchild = _Hypothesis( + raw_score=0.4, + src_phrase_span=(1, 2), + trg_phrase=("and", "goodbye"), + previous=child, + ) + self.hypothesis_chain = grandchild + + def test_translation_so_far(self): + # act + translation = self.hypothesis_chain.translation_so_far() + + # assert + self.assertEqual(translation, ["hello", "world", "and", "goodbye"]) + + def test_translation_so_far_for_empty_hypothesis(self): + # arrange + hypothesis = _Hypothesis() + + # act + translation = hypothesis.translation_so_far() + + # assert + self.assertEqual(translation, []) + + def test_total_translated_words(self): + # act + total_translated_words = self.hypothesis_chain.total_translated_words() + + # assert + self.assertEqual(total_translated_words, 5) + + def test_translated_positions(self): + # act + translated_positions = self.hypothesis_chain.translated_positions() + + # assert + translated_positions.sort() + self.assertEqual(translated_positions, [1, 3, 4, 5, 6]) + + def test_untranslated_spans(self): + # act + untranslated_spans = self.hypothesis_chain.untranslated_spans(10) + + # assert + self.assertEqual(untranslated_spans, [(0, 1), (2, 3), (7, 10)]) + + def test_untranslated_spans_for_empty_hypothesis(self): + # arrange + hypothesis = _Hypothesis() + + # act + untranslated_spans = hypothesis.untranslated_spans(10) + + # assert + self.assertEqual(untranslated_spans, [(0, 10)]) + + +class TestStack(unittest.TestCase): + def test_push_bumps_off_worst_hypothesis_when_stack_is_full(self): + # arrange + stack = _Stack(3) + poor_hypothesis = _Hypothesis(0.01) + + # act + stack.push(_Hypothesis(0.2)) + stack.push(poor_hypothesis) + stack.push(_Hypothesis(0.1)) + stack.push(_Hypothesis(0.3)) + + # assert + self.assertFalse(poor_hypothesis in stack) + + def test_push_removes_hypotheses_that_fall_below_beam_threshold(self): + # arrange + stack = _Stack(3, 0.5) + poor_hypothesis = _Hypothesis(0.01) + worse_hypothesis = _Hypothesis(0.009) + + # act + stack.push(poor_hypothesis) + stack.push(worse_hypothesis) + stack.push(_Hypothesis(0.9)) # greatly superior hypothesis + + # assert + self.assertFalse(poor_hypothesis in stack) + self.assertFalse(worse_hypothesis in stack) + + def test_push_does_not_add_hypothesis_that_falls_below_beam_threshold(self): + # arrange + stack = _Stack(3, 0.5) + poor_hypothesis = _Hypothesis(0.01) + + # act + stack.push(_Hypothesis(0.9)) # greatly superior hypothesis + stack.push(poor_hypothesis) + + # assert + self.assertFalse(poor_hypothesis in stack) + + def test_best_returns_the_best_hypothesis(self): + # arrange + stack = _Stack(3) + best_hypothesis = _Hypothesis(0.99) + + # act + stack.push(_Hypothesis(0.0)) + stack.push(best_hypothesis) + stack.push(_Hypothesis(0.5)) + + # assert + self.assertEqual(stack.best(), best_hypothesis) + + def test_best_returns_none_when_stack_is_empty(self): + # arrange + stack = _Stack(3) + + # assert + self.assertEqual(stack.best(), None)