diff --git a/ckpts/universal/global_step40/zero/18.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/18.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9ce1382b94d1568941f1343c4b0f7465deaaa32f --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93482633809008696ac13147754df15af63147730ef48b2e800c1fefc6d00f08 +size 16778317 diff --git a/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..159d87d655986230fbcfd03be46eacf6ee561d1f --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef05a5b41568f454cb8d51db527fa4cf539c17f2f8392ece5400565f3592d63e +size 9372 diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffc6366a0861bf089dc743a3c2aaea22cc7e42b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a97ae89e5ba9389a4b40ab43d6724a046417b75f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b65cfd01e36a6e240e5e773c936a0e76f966934a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..161f1c53ce8e7d9b42590f78a7fc8435faa4fb5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38d2e085a574513c9347f14dd93f010e773e5234 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b0aef9957d5eb7a14655c6f2ebc612d173a54b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df1872e223c9e590038c82643ffb055b3f083844 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31791847c3c070731fec59e759f2945ca6942a47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..507cfe94ed115c585350598b162dfff7ef795f9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf275934da9debe75d74138642a503db3daee1f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d6470d3573bdce433d42201e61a6a060eaaee2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..576c0aee0f1efbf1b1f1b96d8dcd1998fb223a56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d29545cec5f6472f3b17c4dc9c0a1c13e3dd2c00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e10b0a3cc6b08827f33f9a7c83023122982062a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a340022cc1a2436afeee4e5d1fb4b85274f1e82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cecd27d87e1d01119680ea8995529c6e6eeda790 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d647302cf602b0a531c839aa942aebe66c151a8b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e75a8baa2330e001ed31afe9f9ae2784ded45173 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41873159c19d8f36ffac0a193202f3d4e3f108aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a0f11643163791f57ca2e33b8ad9d8bc8e4076 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295073408e0f1455dbfff0c973a1a70124481eed Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d443477983a7fdb01c8ea76b7fceedb9c99214b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65174ed7a1c3576fb8d3fb71843b2bc44e857a39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed675ca875ac5a6c8116a93d4f8c313984de3645 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bf6c69c10b85c63dd93a448e05464f9baac8f9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11318562a6704f20e1ec8e5afbd6e3e361d6129b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c45041e86bb2c81ddfaada64cfd7396bbf230f05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec0a2ea806d4c09256dc4453a22f082ef9dd07a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f035c4d019436de1ce71b3878eddf9850f0936 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40f6973e04e6c941431ada79b1a18f0277075870 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bea92c55c763549eb34648d37b716d1d74614e76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7350692286295e28923f1e963555d358d4e04769 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..436716c8c3b36804ba43d60e98ba9f1240e9d41b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9021ea440913507b234deadff988e209cb90065c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__init__.py b/venv/lib/python3.10/site-packages/nltk/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ada17ef29e19763f8bc42d103436e7fa72d3cfd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/__init__.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +NLTK Metrics + +Classes and methods for scoring processing modules. +""" + +from nltk.metrics.agreement import AnnotationTask +from nltk.metrics.aline import align +from nltk.metrics.association import ( + BigramAssocMeasures, + ContingencyMeasures, + NgramAssocMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) +from nltk.metrics.confusionmatrix import ConfusionMatrix +from nltk.metrics.distance import ( + binary_distance, + custom_distance, + edit_distance, + edit_distance_align, + fractional_presence, + interval_distance, + jaccard_distance, + masi_distance, + presence, +) +from nltk.metrics.paice import Paice +from nltk.metrics.scores import ( + accuracy, + approxrand, + f_measure, + log_likelihood, + precision, + recall, +) +from nltk.metrics.segmentation import ghd, pk, windowdiff +from nltk.metrics.spearman import ( + ranks_from_scores, + ranks_from_sequence, + spearman_correlation, +) diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..076a581c87ee14bbfd435ae7432843ccc89a887a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1c639fffbff143d0dc5c6a0171ab6b7933c0269 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4025294b08e555ff92de23b7dc28ed74bf42bff Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6bb3468bf0fb94a4344f153bb75ea011a539b9a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9008424e0ab67c007955a2c625cf0b5bd0bf58b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3204ace2475169ddb6ad8d911edba0c8fc066ab5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35620d14beaf25d02319bf1ab782182904edd27e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d5037c1703eb602d4396517f045f3c243dda07d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bbc04ab42d094c401abac627e1b909d3fbedf02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da075ff2768216d323412aacb2c3310bc9eeb935 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/agreement.py b/venv/lib/python3.10/site-packages/nltk/metrics/agreement.py new file mode 100644 index 0000000000000000000000000000000000000000..69b1a39fe2017df3beef39fcdd57b9a73c6ac0f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/agreement.py @@ -0,0 +1,465 @@ +# Natural Language Toolkit: Agreement Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tom Lippincott +# URL: +# For license information, see LICENSE.TXT +# + +""" +Implementations of inter-annotator agreement coefficients surveyed by Artstein +and Poesio (2007), Inter-Coder Agreement for Computational Linguistics. + +An agreement coefficient calculates the amount that annotators agreed on label +assignments beyond what is expected by chance. + +In defining the AnnotationTask class, we use naming conventions similar to the +paper's terminology. There are three types of objects in an annotation task: + + the coders (variables "c" and "C") + the items to be annotated (variables "i" and "I") + the potential categories to be assigned (variables "k" and "K") + +Additionally, it is often the case that we don't want to treat two different +labels as complete disagreement, and so the AnnotationTask constructor can also +take a distance metric as a final argument. Distance metrics are simply +functions that take two arguments, and return a value between 0.0 and 1.0 +indicating the distance between them. If not supplied, the default is binary +comparison between the arguments. + +The simplest way to initialize an AnnotationTask is with a list of triples, +each containing a coder's assignment for one object in the task: + + task = AnnotationTask(data=[('c1', '1', 'v1'),('c2', '1', 'v1'),...]) + +Note that the data list needs to contain the same number of triples for each +individual coder, containing category values for the same set of items. + +Alpha (Krippendorff 1980) +Kappa (Cohen 1960) +S (Bennet, Albert and Goldstein 1954) +Pi (Scott 1955) + + +TODO: Describe handling of multiple coders and missing data + +Expected results from the Artstein and Poesio survey paper: + + >>> from nltk.metrics.agreement import AnnotationTask + >>> import os.path + >>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))]) + >>> t.avg_Ao() + 0.88 + >>> round(t.pi(), 5) + 0.79953 + >>> round(t.S(), 2) + 0.82 + + This would have returned a wrong value (0.0) in @785fb79 as coders are in + the wrong order. Subsequently, all values for pi(), S(), and kappa() would + have been wrong as they are computed with avg_Ao(). + >>> t2 = AnnotationTask(data=[('b','1','stat'),('a','1','stat')]) + >>> t2.avg_Ao() + 1.0 + + The following, of course, also works. + >>> t3 = AnnotationTask(data=[('a','1','othr'),('b','1','othr')]) + >>> t3.avg_Ao() + 1.0 + +""" + +import logging +from itertools import groupby +from operator import itemgetter + +from nltk.internals import deprecated +from nltk.metrics.distance import binary_distance +from nltk.probability import ConditionalFreqDist, FreqDist + +log = logging.getLogger(__name__) + + +class AnnotationTask: + """Represents an annotation task, i.e. people assign labels to items. + + Notation tries to match notation in Artstein and Poesio (2007). + + In general, coders and items can be represented as any hashable object. + Integers, for example, are fine, though strings are more readable. + Labels must support the distance functions applied to them, so e.g. + a string-edit-distance makes no sense if your labels are integers, + whereas interval distance needs numeric values. A notable case of this + is the MASI metric, which requires Python sets. + """ + + def __init__(self, data=None, distance=binary_distance): + """Initialize an annotation task. + + The data argument can be None (to create an empty annotation task) or a sequence of 3-tuples, + each representing a coder's labeling of an item: + ``(coder,item,label)`` + + The distance argument is a function taking two arguments (labels) and producing a numerical distance. + The distance from a label to itself should be zero: + ``distance(l,l) = 0`` + """ + self.distance = distance + self.I = set() + self.K = set() + self.C = set() + self.data = [] + if data is not None: + self.load_array(data) + + def __str__(self): + return "\r\n".join( + map( + lambda x: "%s\t%s\t%s" + % (x["coder"], x["item"].replace("_", "\t"), ",".join(x["labels"])), + self.data, + ) + ) + + def load_array(self, array): + """Load an sequence of annotation results, appending to any data already loaded. + + The argument is a sequence of 3-tuples, each representing a coder's labeling of an item: + (coder,item,label) + """ + for coder, item, labels in array: + self.C.add(coder) + self.K.add(labels) + self.I.add(item) + self.data.append({"coder": coder, "labels": labels, "item": item}) + + def agr(self, cA, cB, i, data=None): + """Agreement between two coders on a given item""" + data = data or self.data + # cfedermann: we don't know what combination of coder/item will come + # first in x; to avoid StopIteration problems due to assuming an order + # cA,cB, we allow either for k1 and then look up the missing as k2. + k1 = next(x for x in data if x["coder"] in (cA, cB) and x["item"] == i) + if k1["coder"] == cA: + k2 = next(x for x in data if x["coder"] == cB and x["item"] == i) + else: + k2 = next(x for x in data if x["coder"] == cA and x["item"] == i) + + ret = 1.0 - float(self.distance(k1["labels"], k2["labels"])) + log.debug("Observed agreement between %s and %s on %s: %f", cA, cB, i, ret) + log.debug( + 'Distance between "%r" and "%r": %f', k1["labels"], k2["labels"], 1.0 - ret + ) + return ret + + def Nk(self, k): + return float(sum(1 for x in self.data if x["labels"] == k)) + + def Nik(self, i, k): + return float(sum(1 for x in self.data if x["item"] == i and x["labels"] == k)) + + def Nck(self, c, k): + return float(sum(1 for x in self.data if x["coder"] == c and x["labels"] == k)) + + @deprecated("Use Nk, Nik or Nck instead") + def N(self, k=None, i=None, c=None): + """Implements the "n-notation" used in Artstein and Poesio (2007)""" + if k is not None and i is None and c is None: + ret = self.Nk(k) + elif k is not None and i is not None and c is None: + ret = self.Nik(i, k) + elif k is not None and c is not None and i is None: + ret = self.Nck(c, k) + else: + raise ValueError( + f"You must pass either i or c, not both! (k={k!r},i={i!r},c={c!r})" + ) + log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret) + return ret + + def _grouped_data(self, field, data=None): + data = data or self.data + return groupby(sorted(data, key=itemgetter(field)), itemgetter(field)) + + def Ao(self, cA, cB): + """Observed agreement between two coders on all items.""" + data = self._grouped_data( + "item", (x for x in self.data if x["coder"] in (cA, cB)) + ) + ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len( + self.I + ) + log.debug("Observed agreement between %s and %s: %f", cA, cB, ret) + return ret + + def _pairwise_average(self, function): + """ + Calculates the average of function results for each coder pair + """ + total = 0 + n = 0 + s = self.C.copy() + for cA in self.C: + s.remove(cA) + for cB in s: + total += function(cA, cB) + n += 1 + ret = total / n + return ret + + def avg_Ao(self): + """Average observed agreement across all coders and items.""" + ret = self._pairwise_average(self.Ao) + log.debug("Average observed agreement: %f", ret) + return ret + + def Do_Kw_pairwise(self, cA, cB, max_distance=1.0): + """The observed disagreement for the weighted kappa coefficient.""" + total = 0.0 + data = (x for x in self.data if x["coder"] in (cA, cB)) + for i, itemdata in self._grouped_data("item", data): + # we should have two items; distance doesn't care which comes first + total += self.distance(next(itemdata)["labels"], next(itemdata)["labels"]) + + ret = total / (len(self.I) * max_distance) + log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret) + return ret + + def Do_Kw(self, max_distance=1.0): + """Averaged over all labelers""" + ret = self._pairwise_average( + lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance) + ) + log.debug("Observed disagreement: %f", ret) + return ret + + # Agreement Coefficients + def S(self): + """Bennett, Albert and Goldstein 1954""" + Ae = 1.0 / len(self.K) + ret = (self.avg_Ao() - Ae) / (1.0 - Ae) + return ret + + def pi(self): + """Scott 1955; here, multi-pi. + Equivalent to K from Siegel and Castellan (1988). + + """ + total = 0.0 + label_freqs = FreqDist(x["labels"] for x in self.data) + for k, f in label_freqs.items(): + total += f**2 + Ae = total / ((len(self.I) * len(self.C)) ** 2) + return (self.avg_Ao() - Ae) / (1 - Ae) + + def Ae_kappa(self, cA, cB): + Ae = 0.0 + nitems = float(len(self.I)) + label_freqs = ConditionalFreqDist((x["labels"], x["coder"]) for x in self.data) + for k in label_freqs.conditions(): + Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems) + return Ae + + def kappa_pairwise(self, cA, cB): + """ """ + Ae = self.Ae_kappa(cA, cB) + ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae) + log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae) + return ret + + def kappa(self): + """Cohen 1960 + Averages naively over kappas for each coder pair. + + """ + return self._pairwise_average(self.kappa_pairwise) + + def multi_kappa(self): + """Davies and Fleiss 1982 + Averages over observed and expected agreements for each coder pair. + + """ + Ae = self._pairwise_average(self.Ae_kappa) + return (self.avg_Ao() - Ae) / (1.0 - Ae) + + def Disagreement(self, label_freqs): + total_labels = sum(label_freqs.values()) + pairs = 0.0 + for j, nj in label_freqs.items(): + for l, nl in label_freqs.items(): + pairs += float(nj * nl) * self.distance(l, j) + return 1.0 * pairs / (total_labels * (total_labels - 1)) + + def alpha(self): + """Krippendorff 1980""" + # check for degenerate cases + if len(self.K) == 0: + raise ValueError("Cannot calculate alpha, no data present!") + if len(self.K) == 1: + log.debug("Only one annotation value, alpha returning 1.") + return 1 + if len(self.C) == 1 and len(self.I) == 1: + raise ValueError("Cannot calculate alpha, only one coder and item present!") + + total_disagreement = 0.0 + total_ratings = 0 + all_valid_labels_freq = FreqDist([]) + + total_do = 0.0 # Total observed disagreement for all items. + for i, itemdata in self._grouped_data("item"): + label_freqs = FreqDist(x["labels"] for x in itemdata) + labels_count = sum(label_freqs.values()) + if labels_count < 2: + # Ignore the item. + continue + all_valid_labels_freq += label_freqs + total_do += self.Disagreement(label_freqs) * labels_count + + do = total_do / sum(all_valid_labels_freq.values()) + + de = self.Disagreement(all_valid_labels_freq) # Expected disagreement. + k_alpha = 1.0 - do / de + + return k_alpha + + def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0): + """Cohen 1968""" + total = 0.0 + label_freqs = ConditionalFreqDist( + (x["coder"], x["labels"]) for x in self.data if x["coder"] in (cA, cB) + ) + for j in self.K: + for l in self.K: + total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l) + De = total / (max_distance * pow(len(self.I), 2)) + log.debug("Expected disagreement between %s and %s: %f", cA, cB, De) + Do = self.Do_Kw_pairwise(cA, cB) + ret = 1.0 - (Do / De) + return ret + + def weighted_kappa(self, max_distance=1.0): + """Cohen 1968""" + return self._pairwise_average( + lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance) + ) + + +if __name__ == "__main__": + + import optparse + import re + + from nltk.metrics import distance + + # process command-line arguments + parser = optparse.OptionParser() + parser.add_option( + "-d", + "--distance", + dest="distance", + default="binary_distance", + help="distance metric to use", + ) + parser.add_option( + "-a", + "--agreement", + dest="agreement", + default="kappa", + help="agreement coefficient to calculate", + ) + parser.add_option( + "-e", + "--exclude", + dest="exclude", + action="append", + default=[], + help="coder names to exclude (may be specified multiple times)", + ) + parser.add_option( + "-i", + "--include", + dest="include", + action="append", + default=[], + help="coder names to include, same format as exclude", + ) + parser.add_option( + "-f", + "--file", + dest="file", + help="file to read labelings from, each line with three columns: 'labeler item labels'", + ) + parser.add_option( + "-v", + "--verbose", + dest="verbose", + default="0", + help="how much debugging to print on stderr (0-4)", + ) + parser.add_option( + "-c", + "--columnsep", + dest="columnsep", + default="\t", + help="char/string that separates the three columns in the file, defaults to tab", + ) + parser.add_option( + "-l", + "--labelsep", + dest="labelsep", + default=",", + help="char/string that separates labels (if labelers can assign more than one), defaults to comma", + ) + parser.add_option( + "-p", + "--presence", + dest="presence", + default=None, + help="convert each labeling into 1 or 0, based on presence of LABEL", + ) + parser.add_option( + "-T", + "--thorough", + dest="thorough", + default=False, + action="store_true", + help="calculate agreement for every subset of the annotators", + ) + (options, remainder) = parser.parse_args() + + if not options.file: + parser.print_help() + exit() + + logging.basicConfig(level=50 - 10 * int(options.verbose)) + + # read in data from the specified file + data = [] + with open(options.file) as infile: + for l in infile: + toks = l.split(options.columnsep) + coder, object_, labels = ( + toks[0], + str(toks[1:-1]), + frozenset(toks[-1].strip().split(options.labelsep)), + ) + if ( + (options.include == options.exclude) + or (len(options.include) > 0 and coder in options.include) + or (len(options.exclude) > 0 and coder not in options.exclude) + ): + data.append((coder, object_, labels)) + + if options.presence: + task = AnnotationTask( + data, getattr(distance, options.distance)(options.presence) + ) + else: + task = AnnotationTask(data, getattr(distance, options.distance)) + + if options.thorough: + pass + else: + print(getattr(task, options.agreement)()) + + logging.shutdown() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/aline.py b/venv/lib/python3.10/site-packages/nltk/metrics/aline.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf8d9930228b2bba3d07b5c92201a011bb9ca25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/aline.py @@ -0,0 +1,1354 @@ +# Natural Language Toolkit: ALINE +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Kondrak +# Geoff Bacon (Python port) +# URL: +# For license information, see LICENSE.TXT + +""" +ALINE +https://webdocs.cs.ualberta.ca/~kondrak/ +Copyright 2002 by Grzegorz Kondrak. + +ALINE is an algorithm for aligning phonetic sequences, described in [1]. +This module is a port of Kondrak's (2002) ALINE. It provides functions for +phonetic sequence alignment and similarity analysis. These are useful in +historical linguistics, sociolinguistics and synchronic phonology. + +ALINE has parameters that can be tuned for desired output. These parameters are: +- C_skip, C_sub, C_exp, C_vwl +- Salience weights +- Segmental features + +In this implementation, some parameters have been changed from their default +values as described in [1], in order to replicate published results. All changes +are noted in comments. + +Example usage +------------- + +# Get optimal alignment of two phonetic sequences + +>>> align('θin', 'tenwis') # doctest: +SKIP +[[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]] + +[1] G. Kondrak. Algorithms for Language Reconstruction. PhD dissertation, +University of Toronto. +""" + +try: + import numpy as np +except ImportError: + np = None + +# === Constants === + +inf = float("inf") + +# Default values for maximum similarity scores (Kondrak 2002: 54) +C_skip = -10 # Indels +C_sub = 35 # Substitutions +C_exp = 45 # Expansions/compressions +C_vwl = 5 # Vowel/consonant relative weight (decreased from 10) + +consonants = [ + "B", + "N", + "R", + "b", + "c", + "d", + "f", + "g", + "h", + "j", + "k", + "l", + "m", + "n", + "p", + "q", + "r", + "s", + "t", + "v", + "x", + "z", + "ç", + "ð", + "ħ", + "ŋ", + "ɖ", + "ɟ", + "ɢ", + "ɣ", + "ɦ", + "ɬ", + "ɮ", + "ɰ", + "ɱ", + "ɲ", + "ɳ", + "ɴ", + "ɸ", + "ɹ", + "ɻ", + "ɽ", + "ɾ", + "ʀ", + "ʁ", + "ʂ", + "ʃ", + "ʈ", + "ʋ", + "ʐ ", + "ʒ", + "ʔ", + "ʕ", + "ʙ", + "ʝ", + "β", + "θ", + "χ", + "ʐ", + "w", +] + +# Relevant features for comparing consonants and vowels +R_c = [ + "aspirated", + "lateral", + "manner", + "nasal", + "place", + "retroflex", + "syllabic", + "voice", +] +# 'high' taken out of R_v because same as manner +R_v = [ + "back", + "lateral", + "long", + "manner", + "nasal", + "place", + "retroflex", + "round", + "syllabic", + "voice", +] + +# Flattened feature matrix (Kondrak 2002: 56) +similarity_matrix = { + # place + "bilabial": 1.0, + "labiodental": 0.95, + "dental": 0.9, + "alveolar": 0.85, + "retroflex": 0.8, + "palato-alveolar": 0.75, + "palatal": 0.7, + "velar": 0.6, + "uvular": 0.5, + "pharyngeal": 0.3, + "glottal": 0.1, + "labiovelar": 1.0, + "vowel": -1.0, # added 'vowel' + # manner + "stop": 1.0, + "affricate": 0.9, + "fricative": 0.85, # increased fricative from 0.8 + "trill": 0.7, + "tap": 0.65, + "approximant": 0.6, + "high vowel": 0.4, + "mid vowel": 0.2, + "low vowel": 0.0, + "vowel2": 0.5, # added vowel + # high + "high": 1.0, + "mid": 0.5, + "low": 0.0, + # back + "front": 1.0, + "central": 0.5, + "back": 0.0, + # binary features + "plus": 1.0, + "minus": 0.0, +} + +# Relative weights of phonetic features (Kondrak 2002: 55) +salience = { + "syllabic": 5, + "place": 40, + "manner": 50, + "voice": 5, # decreased from 10 + "nasal": 20, # increased from 10 + "retroflex": 10, + "lateral": 10, + "aspirated": 5, + "long": 0, # decreased from 1 + "high": 3, # decreased from 5 + "back": 2, # decreased from 5 + "round": 2, # decreased from 5 +} + +# (Kondrak 2002: 59-60) +feature_matrix = { + # Consonants + "p": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "b": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "t": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "d": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʈ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɖ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "c": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɟ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "k": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "g": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "q": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɢ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʔ": { + "place": "glottal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "m": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɱ": { + "place": "labiodental", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "n": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɳ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɲ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ŋ": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɴ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "N": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʙ": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "B": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "r": { + "place": "alveolar", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʀ": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "R": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɾ": { + "place": "alveolar", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɽ": { + "place": "retroflex", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɸ": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "β": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "f": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "v": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "θ": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ð": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "s": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "z": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʃ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʒ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʂ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʐ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ç": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʝ": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "x": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɣ": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "χ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʁ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ħ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʕ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "h": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɦ": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɬ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ɮ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ʋ": { + "place": "labiodental", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɹ": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɻ": { + "place": "retroflex", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "j": { + "place": "palatal", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɰ": { + "place": "velar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "l": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "w": { + "place": "labiovelar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + # Vowels + "i": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "y": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "e": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "E": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ø": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɛ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "œ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "æ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "a": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "A": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ɨ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "ʉ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ə": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "u": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "U": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "o": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "O": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "ɔ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɒ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "back", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "I": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, +} + +# === Algorithm === + + +def align(str1, str2, epsilon=0): + """ + Compute the alignment of two phonetic strings. + + :param str str1: First string to be aligned + :param str str2: Second string to be aligned + + :type epsilon: float (0.0 to 1.0) + :param epsilon: Adjusts threshold similarity score for near-optimal alignments + + :rtype: list(list(tuple(str, str))) + :return: Alignment(s) of str1 and str2 + + (Kondrak 2002: 51) + """ + if np is None: + raise ImportError("You need numpy in order to use the align function") + + assert 0.0 <= epsilon <= 1.0, "Epsilon must be between 0.0 and 1.0." + m = len(str1) + n = len(str2) + # This includes Kondrak's initialization of row 0 and column 0 to all 0s. + S = np.zeros((m + 1, n + 1), dtype=float) + + # If i <= 1 or j <= 1, don't allow expansions as it doesn't make sense, + # and breaks array and string indices. Make sure they never get chosen + # by setting them to -inf. + for i in range(1, m + 1): + for j in range(1, n + 1): + edit1 = S[i - 1, j] + sigma_skip(str1[i - 1]) + edit2 = S[i, j - 1] + sigma_skip(str2[j - 1]) + edit3 = S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + if i > 1: + edit4 = S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + else: + edit4 = -inf + if j > 1: + edit5 = S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + else: + edit5 = -inf + S[i, j] = max(edit1, edit2, edit3, edit4, edit5, 0) + + T = (1 - epsilon) * np.amax(S) # Threshold score for near-optimal alignments + + alignments = [] + for i in range(1, m + 1): + for j in range(1, n + 1): + if S[i, j] >= T: + alignments.append(_retrieve(i, j, 0, S, T, str1, str2, [])) + return alignments + + +def _retrieve(i, j, s, S, T, str1, str2, out): + """ + Retrieve the path through the similarity matrix S starting at (i, j). + + :rtype: list(tuple(str, str)) + :return: Alignment of str1 and str2 + """ + if S[i, j] == 0: + return out + else: + if j > 1 and S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 2 : j])) + _retrieve( + i - 1, + j - 2, + s + sigma_exp(str1[i - 1], str2[j - 2 : j]), + S, + T, + str1, + str2, + out, + ) + elif ( + i > 1 and S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + s >= T + ): + out.insert(0, (str1[i - 2 : i], str2[j - 1])) + _retrieve( + i - 2, + j - 1, + s + sigma_exp(str2[j - 1], str1[i - 2 : i]), + S, + T, + str1, + str2, + out, + ) + elif S[i, j - 1] + sigma_skip(str2[j - 1]) + s >= T: + out.insert(0, ("-", str2[j - 1])) + _retrieve(i, j - 1, s + sigma_skip(str2[j - 1]), S, T, str1, str2, out) + elif S[i - 1, j] + sigma_skip(str1[i - 1]) + s >= T: + out.insert(0, (str1[i - 1], "-")) + _retrieve(i - 1, j, s + sigma_skip(str1[i - 1]), S, T, str1, str2, out) + elif S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 1])) + _retrieve( + i - 1, + j - 1, + s + sigma_sub(str1[i - 1], str2[j - 1]), + S, + T, + str1, + str2, + out, + ) + return out + + +def sigma_skip(p): + """ + Returns score of an indel of P. + + (Kondrak 2002: 54) + """ + return C_skip + + +def sigma_sub(p, q): + """ + Returns score of a substitution of P with Q. + + (Kondrak 2002: 54) + """ + return C_sub - delta(p, q) - V(p) - V(q) + + +def sigma_exp(p, q): + """ + Returns score of an expansion/compression. + + (Kondrak 2002: 54) + """ + q1 = q[0] + q2 = q[1] + return C_exp - delta(p, q1) - delta(p, q2) - V(p) - max(V(q1), V(q2)) + + +def delta(p, q): + """ + Return weighted sum of difference between P and Q. + + (Kondrak 2002: 54) + """ + features = R(p, q) + total = 0 + for f in features: + total += diff(p, q, f) * salience[f] + return total + + +def diff(p, q, f): + """ + Returns difference between phonetic segments P and Q for feature F. + + (Kondrak 2002: 52, 54) + """ + p_features, q_features = feature_matrix[p], feature_matrix[q] + return abs(similarity_matrix[p_features[f]] - similarity_matrix[q_features[f]]) + + +def R(p, q): + """ + Return relevant features for segment comparison. + + (Kondrak 2002: 54) + """ + if p in consonants or q in consonants: + return R_c + return R_v + + +def V(p): + """ + Return vowel weight if P is vowel. + + (Kondrak 2002: 54) + """ + if p in consonants: + return 0 + return C_vwl + + +# === Test === + + +def demo(): + """ + A demonstration of the result of aligning phonetic sequences + used in Kondrak's (2002) dissertation. + """ + data = [pair.split(",") for pair in cognate_data.split("\n")] + for pair in data: + alignment = align(pair[0], pair[1])[0] + alignment = [f"({a[0]}, {a[1]})" for a in alignment] + alignment = " ".join(alignment) + print(f"{pair[0]} ~ {pair[1]} : {alignment}") + + +cognate_data = """jo,ʒə +tu,ty +nosotros,nu +kjen,ki +ke,kwa +todos,tu +una,ən +dos,dø +tres,trwa +ombre,om +arbol,arbrə +pluma,plym +kabeθa,kap +boka,buʃ +pje,pje +koraθon,kœr +ber,vwar +benir,vənir +deθir,dir +pobre,povrə +ðis,dIzes +ðæt,das +wat,vas +nat,nixt +loŋ,laŋ +mæn,man +fleʃ,flajʃ +bləd,blyt +feðər,fEdər +hær,hAr +ir,Or +aj,awgə +nowz,nAzə +mawθ,munt +təŋ,tsuŋə +fut,fys +nij,knI +hænd,hant +hart,herts +livər,lEbər +ænd,ante +æt,ad +blow,flAre +ir,awris +ijt,edere +fiʃ,piʃkis +flow,fluere +staɾ,stella +ful,plenus +græs,gramen +hart,kordis +horn,korny +aj,ego +nij,genU +məðər,mAter +mawntən,mons +nejm,nomen +njuw,nowus +wən,unus +rawnd,rotundus +sow,suere +sit,sedere +θrij,tres +tuwθ,dentis +θin,tenwis +kinwawa,kenuaʔ +nina,nenah +napewa,napɛw +wapimini,wapemen +namesa,namɛʔs +okimawa,okemaw +ʃiʃipa,seʔsep +ahkohkwa,ahkɛh +pematesiweni,pematesewen +asenja,aʔsɛn""" + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/association.py b/venv/lib/python3.10/site-packages/nltk/metrics/association.py new file mode 100644 index 0000000000000000000000000000000000000000..b7010f1f4dd39c122a263aff5d243b3c19c52822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/association.py @@ -0,0 +1,476 @@ +# Natural Language Toolkit: Ngram Association Measures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT + +""" +Provides scoring functions for a number of association measures through a +generic, abstract implementation in ``NgramAssocMeasures``, and n-specific +``BigramAssocMeasures`` and ``TrigramAssocMeasures``. +""" + +import math as _math +from abc import ABCMeta, abstractmethod +from functools import reduce + +_log2 = lambda x: _math.log2(x) +_ln = _math.log + +_product = lambda s: reduce(lambda x, y: x * y, s) + +_SMALL = 1e-20 + +try: + from scipy.stats import fisher_exact +except ImportError: + + def fisher_exact(*_args, **_kwargs): + raise NotImplementedError + + +### Indices to marginals arguments: + +NGRAM = 0 +"""Marginals index for the ngram count""" + +UNIGRAMS = -2 +"""Marginals index for a tuple of each unigram count""" + +TOTAL = -1 +"""Marginals index for the number of words in the data""" + + +class NgramAssocMeasures(metaclass=ABCMeta): + """ + An abstract class defining a collection of generic association measures. + Each public method returns a score, taking the following arguments:: + + score_fn(count_of_ngram, + (count_of_n-1gram_1, ..., count_of_n-1gram_j), + (count_of_n-2gram_1, ..., count_of_n-2gram_k), + ..., + (count_of_1gram_1, ..., count_of_1gram_n), + count_of_total_words) + + See ``BigramAssocMeasures`` and ``TrigramAssocMeasures`` + + Inheriting classes should define a property _n, and a method _contingency + which calculates contingency values from marginals in order for all + association measures defined here to be usable. + """ + + _n = 0 + + @staticmethod + @abstractmethod + def _contingency(*marginals): + """Calculates values of a contingency table from marginal values.""" + raise NotImplementedError( + "The contingency table is not available" "in the general ngram case" + ) + + @staticmethod + @abstractmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values.""" + raise NotImplementedError( + "The contingency table is not available" "in the general ngram case" + ) + + @classmethod + def _expected_values(cls, cont): + """Calculates expected values for a contingency table.""" + n_all = sum(cont) + bits = [1 << i for i in range(cls._n)] + + # For each contingency table cell + for i in range(len(cont)): + # Yield the expected value + yield ( + _product( + sum(cont[x] for x in range(2**cls._n) if (x & j) == (i & j)) + for j in bits + ) + / (n_all ** (cls._n - 1)) + ) + + @staticmethod + def raw_freq(*marginals): + """Scores ngrams by their frequency""" + return marginals[NGRAM] / marginals[TOTAL] + + @classmethod + def student_t(cls, *marginals): + """Scores ngrams using Student's t test with independence hypothesis + for unigrams, as in Manning and Schutze 5.3.1. + """ + return ( + marginals[NGRAM] + - _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) + ) / (marginals[NGRAM] + _SMALL) ** 0.5 + + @classmethod + def chi_sq(cls, *marginals): + """Scores ngrams using Pearson's chi-square as in Manning and Schutze + 5.3.3. + """ + cont = cls._contingency(*marginals) + exps = cls._expected_values(cont) + return sum((obs - exp) ** 2 / (exp + _SMALL) for obs, exp in zip(cont, exps)) + + @staticmethod + def mi_like(*marginals, **kwargs): + """Scores ngrams using a variant of mutual information. The keyword + argument power sets an exponent (default 3) for the numerator. No + logarithm of the result is calculated. + """ + return marginals[NGRAM] ** kwargs.get("power", 3) / _product( + marginals[UNIGRAMS] + ) + + @classmethod + def pmi(cls, *marginals): + """Scores ngrams by pointwise mutual information, as in Manning and + Schutze 5.4. + """ + return _log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) - _log2( + _product(marginals[UNIGRAMS]) + ) + + @classmethod + def likelihood_ratio(cls, *marginals): + """Scores ngrams using likelihood ratios as in Manning and Schutze 5.3.4.""" + cont = cls._contingency(*marginals) + return 2 * sum( + obs * _ln(obs / (exp + _SMALL) + _SMALL) + for obs, exp in zip(cont, cls._expected_values(cont)) + ) + + @classmethod + def poisson_stirling(cls, *marginals): + """Scores ngrams using the Poisson-Stirling measure.""" + exp = _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) + return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1) + + @classmethod + def jaccard(cls, *marginals): + """Scores ngrams using the Jaccard index.""" + cont = cls._contingency(*marginals) + return cont[0] / sum(cont[:-1]) + + +class BigramAssocMeasures(NgramAssocMeasures): + """ + A collection of bigram association measures. Each association measure + is provided as a function with three arguments:: + + bigram_score_fn(n_ii, (n_ix, n_xi), n_xx) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_ii counts ``(w1, w2)``, i.e. the bigram being scored + - n_ix counts ``(w1, *)`` + - n_xi counts ``(*, w2)`` + - n_xx counts ``(*, *)``, i.e. any bigram + + This may be shown with respect to a contingency table:: + + w1 ~w1 + ------ ------ + w2 | n_ii | n_oi | = n_xi + ------ ------ + ~w2 | n_io | n_oo | + ------ ------ + = n_ix TOTAL = n_xx + """ + + _n = 2 + + @staticmethod + def _contingency(n_ii, n_ix_xi_tuple, n_xx): + """Calculates values of a bigram contingency table from marginal values.""" + (n_ix, n_xi) = n_ix_xi_tuple + n_oi = n_xi - n_ii + n_io = n_ix - n_ii + return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io) + + @staticmethod + def _marginals(n_ii, n_oi, n_io, n_oo): + """Calculates values of contingency table marginals from its values.""" + return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii) + + @staticmethod + def _expected_values(cont): + """Calculates expected values for a contingency table.""" + n_xx = sum(cont) + # For each contingency table cell + for i in range(4): + yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx + + @classmethod + def phi_sq(cls, *marginals): + """Scores bigrams using phi-square, the square of the Pearson correlation + coefficient. + """ + n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) + + return (n_ii * n_oo - n_io * n_oi) ** 2 / ( + (n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo) + ) + + @classmethod + def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx): + """Scores bigrams using chi-square, i.e. phi-sq multiplied by the number + of bigrams, as in Manning and Schutze 5.3.3. + """ + (n_ix, n_xi) = n_ix_xi_tuple + return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx) + + @classmethod + def fisher(cls, *marginals): + """Scores bigrams using Fisher's Exact Test (Pedersen 1996). Less + sensitive to small counts than PMI or Chi Sq, but also more expensive + to compute. Requires scipy. + """ + + n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) + + (odds, pvalue) = fisher_exact([[n_ii, n_io], [n_oi, n_oo]], alternative="less") + return pvalue + + @staticmethod + def dice(n_ii, n_ix_xi_tuple, n_xx): + """Scores bigrams using Dice's coefficient.""" + (n_ix, n_xi) = n_ix_xi_tuple + return 2 * n_ii / (n_ix + n_xi) + + +class TrigramAssocMeasures(NgramAssocMeasures): + """ + A collection of trigram association measures. Each association measure + is provided as a function with four arguments:: + + trigram_score_fn(n_iii, + (n_iix, n_ixi, n_xii), + (n_ixx, n_xix, n_xxi), + n_xxx) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_iii counts ``(w1, w2, w3)``, i.e. the trigram being scored + - n_ixx counts ``(w1, *, *)`` + - n_xxx counts ``(*, *, *)``, i.e. any trigram + """ + + _n = 3 + + @staticmethod + def _contingency(n_iii, n_iix_tuple, n_ixx_tuple, n_xxx): + """Calculates values of a trigram contingency table (or cube) from + marginal values. + >>> TrigramAssocMeasures._contingency(1, (1, 1, 1), (1, 73, 1), 2000) + (1, 0, 0, 0, 0, 72, 0, 1927) + """ + (n_iix, n_ixi, n_xii) = n_iix_tuple + (n_ixx, n_xix, n_xxi) = n_ixx_tuple + n_oii = n_xii - n_iii + n_ioi = n_ixi - n_iii + n_iio = n_iix - n_iii + n_ooi = n_xxi - n_iii - n_oii - n_ioi + n_oio = n_xix - n_iii - n_oii - n_iio + n_ioo = n_ixx - n_iii - n_ioi - n_iio + n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo + + return (n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo) + + @staticmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values. + >>> TrigramAssocMeasures._marginals(1, 0, 0, 0, 0, 72, 0, 1927) + (1, (1, 1, 1), (1, 73, 1), 2000) + """ + n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo = contingency + return ( + n_iii, + (n_iii + n_iio, n_iii + n_ioi, n_iii + n_oii), + ( + n_iii + n_ioi + n_iio + n_ioo, + n_iii + n_oii + n_iio + n_oio, + n_iii + n_oii + n_ioi + n_ooi, + ), + sum(contingency), + ) + + +class QuadgramAssocMeasures(NgramAssocMeasures): + """ + A collection of quadgram association measures. Each association measure + is provided as a function with five arguments:: + + trigram_score_fn(n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all) + + The arguments constitute the marginals of a contingency table, counting + the occurrences of particular events in a corpus. The letter i in the + suffix refers to the appearance of the word in question, while x indicates + the appearance of any word. Thus, for example: + + - n_iiii counts ``(w1, w2, w3, w4)``, i.e. the quadgram being scored + - n_ixxi counts ``(w1, *, *, w4)`` + - n_xxxx counts ``(*, *, *, *)``, i.e. any quadgram + """ + + _n = 4 + + @staticmethod + def _contingency(n_iiii, n_iiix_tuple, n_iixx_tuple, n_ixxx_tuple, n_xxxx): + """Calculates values of a quadgram contingency table from + marginal values. + """ + (n_iiix, n_iixi, n_ixii, n_xiii) = n_iiix_tuple + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix) = n_iixx_tuple + (n_ixxx, n_xixx, n_xxix, n_xxxi) = n_ixxx_tuple + n_oiii = n_xiii - n_iiii + n_ioii = n_ixii - n_iiii + n_iioi = n_iixi - n_iiii + n_ooii = n_xxii - n_iiii - n_oiii - n_ioii + n_oioi = n_xixi - n_iiii - n_oiii - n_iioi + n_iooi = n_ixxi - n_iiii - n_ioii - n_iioi + n_oooi = n_xxxi - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_iooi - n_oioi + n_iiio = n_iiix - n_iiii + n_oiio = n_xiix - n_iiii - n_oiii - n_iiio + n_ioio = n_ixix - n_iiii - n_ioii - n_iiio + n_ooio = n_xxix - n_iiii - n_oiii - n_ioii - n_iiio - n_ooii - n_ioio - n_oiio + n_iioo = n_iixx - n_iiii - n_iioi - n_iiio + n_oioo = n_xixx - n_iiii - n_oiii - n_iioi - n_iiio - n_oioi - n_oiio - n_iioo + n_iooo = n_ixxx - n_iiii - n_ioii - n_iioi - n_iiio - n_iooi - n_iioo - n_ioio + n_oooo = ( + n_xxxx + - n_iiii + - n_oiii + - n_ioii + - n_iioi + - n_ooii + - n_oioi + - n_iooi + - n_oooi + - n_iiio + - n_oiio + - n_ioio + - n_ooio + - n_iioo + - n_oioo + - n_iooo + ) + + return ( + n_iiii, + n_oiii, + n_ioii, + n_ooii, + n_iioi, + n_oioi, + n_iooi, + n_oooi, + n_iiio, + n_oiio, + n_ioio, + n_ooio, + n_iioo, + n_oioo, + n_iooo, + n_oooo, + ) + + @staticmethod + def _marginals(*contingency): + """Calculates values of contingency table marginals from its values. + QuadgramAssocMeasures._marginals(1, 0, 2, 46, 552, 825, 2577, 34967, 1, 0, 2, 48, 7250, 9031, 28585, 356653) + (1, (2, 553, 3, 1), (7804, 6, 3132, 1378, 49, 2), (38970, 17660, 100, 38970), 440540) + """ + ( + n_iiii, + n_oiii, + n_ioii, + n_ooii, + n_iioi, + n_oioi, + n_iooi, + n_oooi, + n_iiio, + n_oiio, + n_ioio, + n_ooio, + n_iioo, + n_oioo, + n_iooo, + n_oooo, + ) = contingency + + n_iiix = n_iiii + n_iiio + n_iixi = n_iiii + n_iioi + n_ixii = n_iiii + n_ioii + n_xiii = n_iiii + n_oiii + + n_iixx = n_iiii + n_iioi + n_iiio + n_iioo + n_ixix = n_iiii + n_ioii + n_iiio + n_ioio + n_ixxi = n_iiii + n_ioii + n_iioi + n_iooi + n_xixi = n_iiii + n_oiii + n_iioi + n_oioi + n_xxii = n_iiii + n_oiii + n_ioii + n_ooii + n_xiix = n_iiii + n_oiii + n_iiio + n_oiio + + n_ixxx = n_iiii + n_ioii + n_iioi + n_iiio + n_iooi + n_iioo + n_ioio + n_iooo + n_xixx = n_iiii + n_oiii + n_iioi + n_iiio + n_oioi + n_oiio + n_iioo + n_oioo + n_xxix = n_iiii + n_oiii + n_ioii + n_iiio + n_ooii + n_ioio + n_oiio + n_ooio + n_xxxi = n_iiii + n_oiii + n_ioii + n_iioi + n_ooii + n_iooi + n_oioi + n_oooi + + n_all = sum(contingency) + + return ( + n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all, + ) + + +class ContingencyMeasures: + """Wraps NgramAssocMeasures classes such that the arguments of association + measures are contingency table values rather than marginals. + """ + + def __init__(self, measures): + """Constructs a ContingencyMeasures given a NgramAssocMeasures class""" + self.__class__.__name__ = "Contingency" + measures.__class__.__name__ + for k in dir(measures): + if k.startswith("__"): + continue + v = getattr(measures, k) + if not k.startswith("_"): + v = self._make_contingency_fn(measures, v) + setattr(self, k, v) + + @staticmethod + def _make_contingency_fn(measures, old_fn): + """From an association measure function, produces a new function which + accepts contingency table values as its arguments. + """ + + def res(*contingency): + return old_fn(*measures._marginals(*contingency)) + + res.__doc__ = old_fn.__doc__ + res.__name__ = old_fn.__name__ + return res diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py b/venv/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb6ee9b2a7e1a9b2235d9268d20fc1269908fe1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py @@ -0,0 +1,353 @@ +# Natural Language Toolkit: Confusion Matrices +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +from nltk.probability import FreqDist + + +class ConfusionMatrix: + """ + The confusion matrix between a list of reference values and a + corresponding list of test values. Entry *[r,t]* of this + matrix is a count of the number of times that the reference value + *r* corresponds to the test value *t*. E.g.: + + >>> from nltk.metrics import ConfusionMatrix + >>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split() + >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split() + >>> cm = ConfusionMatrix(ref, test) + >>> print(cm['NN', 'NN']) + 3 + + Note that the diagonal entries *Ri=Tj* of this matrix + corresponds to correct values; and the off-diagonal entries + correspond to incorrect values. + """ + + def __init__(self, reference, test, sort_by_count=False): + """ + Construct a new confusion matrix from a list of reference + values and a corresponding list of test values. + + :type reference: list + :param reference: An ordered list of reference values. + :type test: list + :param test: A list of values to compare against the + corresponding reference values. + :raise ValueError: If ``reference`` and ``length`` do not have + the same length. + """ + if len(reference) != len(test): + raise ValueError("Lists must have the same length.") + + # Get a list of all values. + if sort_by_count: + ref_fdist = FreqDist(reference) + test_fdist = FreqDist(test) + + def key(v): + return -(ref_fdist[v] + test_fdist[v]) + + values = sorted(set(reference + test), key=key) + else: + values = sorted(set(reference + test)) + + # Construct a value->index dictionary + indices = {val: i for (i, val) in enumerate(values)} + + # Make a confusion matrix table. + confusion = [[0 for _ in values] for _ in values] + max_conf = 0 # Maximum confusion + for w, g in zip(reference, test): + confusion[indices[w]][indices[g]] += 1 + max_conf = max(max_conf, confusion[indices[w]][indices[g]]) + + #: A list of all values in ``reference`` or ``test``. + self._values = values + #: A dictionary mapping values in ``self._values`` to their indices. + self._indices = indices + #: The confusion matrix itself (as a list of lists of counts). + self._confusion = confusion + #: The greatest count in ``self._confusion`` (used for printing). + self._max_conf = max_conf + #: The total number of values in the confusion matrix. + self._total = len(reference) + #: The number of correct (on-diagonal) values in the matrix. + self._correct = sum(confusion[i][i] for i in range(len(values))) + + def __getitem__(self, li_lj_tuple): + """ + :return: The number of times that value ``li`` was expected and + value ``lj`` was given. + :rtype: int + """ + (li, lj) = li_lj_tuple + i = self._indices[li] + j = self._indices[lj] + return self._confusion[i][j] + + def __repr__(self): + return f"" + + def __str__(self): + return self.pretty_format() + + def pretty_format( + self, + show_percents=False, + values_in_chart=True, + truncate=None, + sort_by_count=False, + ): + """ + :return: A multi-line string representation of this confusion matrix. + :type truncate: int + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. + :param sort_by_count: If true, then sort by the count of each + label in the reference data. I.e., labels that occur more + frequently in the reference label will be towards the left + edge of the matrix, and labels that occur less frequently + will be towards the right edge. + + @todo: add marginals? + """ + confusion = self._confusion + + values = self._values + if sort_by_count: + values = sorted( + values, key=lambda v: -sum(self._confusion[self._indices[v]]) + ) + + if truncate: + values = values[:truncate] + + if values_in_chart: + value_strings = ["%s" % val for val in values] + else: + value_strings = [str(n + 1) for n in range(len(values))] + + # Construct a format string for row values + valuelen = max(len(val) for val in value_strings) + value_format = "%" + repr(valuelen) + "s | " + # Construct a format string for matrix entries + if show_percents: + entrylen = 6 + entry_format = "%5.1f%%" + zerostr = " ." + else: + entrylen = len(repr(self._max_conf)) + entry_format = "%" + repr(entrylen) + "d" + zerostr = " " * (entrylen - 1) + "." + + # Write the column values. + s = "" + for i in range(valuelen): + s += (" " * valuelen) + " |" + for val in value_strings: + if i >= valuelen - len(val): + s += val[i - valuelen + len(val)].rjust(entrylen + 1) + else: + s += " " * (entrylen + 1) + s += " |\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write the entries. + for val, li in zip(value_strings, values): + i = self._indices[li] + s += value_format % val + for lj in values: + j = self._indices[lj] + if confusion[i][j] == 0: + s += zerostr + elif show_percents: + s += entry_format % (100.0 * confusion[i][j] / self._total) + else: + s += entry_format % confusion[i][j] + if i == j: + prevspace = s.rfind(" ") + s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">" + else: + s += " " + s += "|\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write a key + s += "(row = reference; col = test)\n" + if not values_in_chart: + s += "Value key:\n" + for i, value in enumerate(values): + s += "%6d: %s\n" % (i + 1, value) + + return s + + def key(self): + values = self._values + str = "Value key:\n" + indexlen = len(repr(len(values) - 1)) + key_format = " %" + repr(indexlen) + "d: %s\n" + for i in range(len(values)): + str += key_format % (i, values[i]) + + return str + + def recall(self, value): + """Given a value in the confusion matrix, return the recall + that corresponds to this value. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to how often ``value`` was + the true result. + + :param value: value used in the ConfusionMatrix + :return: the recall corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was correct + TP_FN = sum(self[value, pred_value] for pred_value in self._values) + if TP_FN == 0: + return 0.0 + return TP / TP_FN + + def precision(self, value): + """Given a value in the confusion matrix, return the precision + that corresponds to this value. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to the number of predictions + for ``value``. + + :param value: value used in the ConfusionMatrix + :return: the precision corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was predicted + TP_FP = sum(self[real_value, value] for real_value in self._values) + if TP_FP == 0: + return 0.0 + return TP / TP_FP + + def f_measure(self, value, alpha=0.5): + """ + Given a value used in the confusion matrix, return the f-measure + that corresponds to this value. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param value: value used in the ConfusionMatrix + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: the F-measure corresponding to ``value``. + :rtype: float + """ + p = self.precision(value) + r = self.recall(value) + if p == 0.0 or r == 0.0: + return 0.0 + return 1.0 / (alpha / p + (1 - alpha) / r) + + def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False): + """ + Tabulate the **recall**, **precision** and **f-measure** + for each value in this confusion matrix. + + >>> reference = "DET NN VB DET JJ NN NN IN DET NN".split() + >>> test = "DET VB VB DET NN NN NN IN DET NN".split() + >>> cm = ConfusionMatrix(reference, test) + >>> print(cm.evaluate()) + Tag | Prec. | Recall | F-measure + ----+--------+--------+----------- + DET | 1.0000 | 1.0000 | 1.0000 + IN | 1.0000 | 1.0000 | 1.0000 + JJ | 0.0000 | 0.0000 | 0.0000 + NN | 0.7500 | 0.7500 | 0.7500 + VB | 0.5000 | 1.0000 | 0.6667 + + + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on frequency + in the reference label. Defaults to False. + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + tags = self._values + + # Apply keyword parameters + if sort_by_count: + tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]])) + if truncate: + tags = tags[:truncate] + + tag_column_len = max(max(len(tag) for tag in tags), 3) + + # Construct the header + s = ( + f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n" + f"{'-' * tag_column_len}-+--------+--------+-----------\n" + ) + + # Construct the body + for tag in tags: + s += ( + f"{tag:>{tag_column_len}} | " + f"{self.precision(tag):<6.4f} | " + f"{self.recall(tag):<6.4f} | " + f"{self.f_measure(tag, alpha=alpha):.4f}\n" + ) + + return s + + +def demo(): + reference = "DET NN VB DET JJ NN NN IN DET NN".split() + test = "DET VB VB DET NN NN NN IN DET NN".split() + print("Reference =", reference) + print("Test =", test) + print("Confusion matrix:") + print(ConfusionMatrix(reference, test)) + print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True)) + + print(ConfusionMatrix(reference, test).recall("VB")) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/distance.py b/venv/lib/python3.10/site-packages/nltk/metrics/distance.py new file mode 100644 index 0000000000000000000000000000000000000000..1f115d97abd6678f7b1a3b15b2e68671d70e5ea7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/distance.py @@ -0,0 +1,508 @@ +# Natural Language Toolkit: Distance Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Tom Lippincott +# URL: +# For license information, see LICENSE.TXT +# + +""" +Distance Metrics. + +Compute the distance between two items (usually strings). +As metrics, they must satisfy the following three requirements: + +1. d(a, a) = 0 +2. d(a, b) >= 0 +3. d(a, c) <= d(a, b) + d(b, c) +""" + +import operator +import warnings + + +def _edit_dist_init(len1, len2): + lev = [] + for i in range(len1): + lev.append([0] * len2) # initialize 2D array to zero + for i in range(len1): + lev[i][0] = i # column 0: 0,1,2,3,4,... + for j in range(len2): + lev[0][j] = j # row 0: 0,1,2,3,4,... + return lev + + +def _last_left_t_init(sigma): + return {c: 0 for c in sigma} + + +def _edit_dist_step( + lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False +): + c1 = s1[i - 1] + c2 = s2[j - 1] + + # skipping a character in s1 + a = lev[i - 1][j] + 1 + # skipping a character in s2 + b = lev[i][j - 1] + 1 + # substitution + c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0) + + # transposition + d = c + 1 # never picked by default + if transpositions and last_left > 0 and last_right > 0: + d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1 + + # pick the cheapest + lev[i][j] = min(a, b, c, d) + + +def edit_distance(s1, s2, substitution_cost=1, transpositions=False): + """ + Calculate the Levenshtein edit-distance between two strings. + The edit distance is the number of characters that need to be + substituted, inserted, or deleted, to transform s1 into s2. For + example, transforming "rain" to "shine" requires three steps, + consisting of two substitutions and one insertion: + "rain" -> "sain" -> "shin" -> "shine". These operations could have + been done in other orders, but at least three steps are needed. + + Allows specifying the cost of substitution edits (e.g., "a" -> "b"), + because sometimes it makes sense to assign greater penalties to + substitutions. + + This also optionally allows transposition edits (e.g., "ab" -> "ba"), + though this is disabled by default. + + :param s1, s2: The strings to be analysed + :param transpositions: Whether to allow transposition edits + :type s1: str + :type s2: str + :type substitution_cost: int + :type transpositions: bool + :rtype: int + """ + # set up a 2-D array + len1 = len(s1) + len2 = len(s2) + lev = _edit_dist_init(len1 + 1, len2 + 1) + + # retrieve alphabet + sigma = set() + sigma.update(s1) + sigma.update(s2) + + # set up table to remember positions of last seen occurrence in s1 + last_left_t = _last_left_t_init(sigma) + + # iterate over the array + # i and j start from 1 and not 0 to stay close to the wikipedia pseudo-code + # see https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance + for i in range(1, len1 + 1): + last_right_buf = 0 + for j in range(1, len2 + 1): + last_left = last_left_t[s2[j - 1]] + last_right = last_right_buf + if s1[i - 1] == s2[j - 1]: + last_right_buf = j + _edit_dist_step( + lev, + i, + j, + s1, + s2, + last_left, + last_right, + substitution_cost=substitution_cost, + transpositions=transpositions, + ) + last_left_t[s1[i - 1]] = i + return lev[len1][len2] + + +def _edit_dist_backtrace(lev): + i, j = len(lev) - 1, len(lev[0]) - 1 + alignment = [(i, j)] + + while (i, j) != (0, 0): + directions = [ + (i - 1, j - 1), # substitution + (i - 1, j), # skip s1 + (i, j - 1), # skip s2 + ] + + direction_costs = ( + (lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j)) + for i, j in directions + ) + _, (i, j) = min(direction_costs, key=operator.itemgetter(0)) + + alignment.append((i, j)) + return list(reversed(alignment)) + + +def edit_distance_align(s1, s2, substitution_cost=1): + """ + Calculate the minimum Levenshtein edit-distance based alignment + mapping between two strings. The alignment finds the mapping + from string s1 to s2 that minimizes the edit distance cost. + For example, mapping "rain" to "shine" would involve 2 + substitutions, 2 matches and an insertion resulting in + the following mapping: + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)] + NB: (0, 0) is the start state without any letters associated + See more: https://web.stanford.edu/class/cs124/lec/med.pdf + + In case of multiple valid minimum-distance alignments, the + backtrace has the following operation precedence: + + 1. Substitute s1 and s2 characters + 2. Skip s1 character + 3. Skip s2 character + + The backtrace is carried out in reverse string order. + + This function does not support transposition. + + :param s1, s2: The strings to be aligned + :type s1: str + :type s2: str + :type substitution_cost: int + :rtype: List[Tuple(int, int)] + """ + # set up a 2-D array + len1 = len(s1) + len2 = len(s2) + lev = _edit_dist_init(len1 + 1, len2 + 1) + + # iterate over the array + for i in range(len1): + for j in range(len2): + _edit_dist_step( + lev, + i + 1, + j + 1, + s1, + s2, + 0, + 0, + substitution_cost=substitution_cost, + transpositions=False, + ) + + # backtrace to find alignment + alignment = _edit_dist_backtrace(lev) + return alignment + + +def binary_distance(label1, label2): + """Simple equality test. + + 0.0 if the labels are identical, 1.0 if they are different. + + >>> from nltk.metrics import binary_distance + >>> binary_distance(1,1) + 0.0 + + >>> binary_distance(1,3) + 1.0 + """ + + return 0.0 if label1 == label2 else 1.0 + + +def jaccard_distance(label1, label2): + """Distance metric comparing set-similarity.""" + return (len(label1.union(label2)) - len(label1.intersection(label2))) / len( + label1.union(label2) + ) + + +def masi_distance(label1, label2): + """Distance metric that takes into account partial agreement when multiple + labels are assigned. + + >>> from nltk.metrics import masi_distance + >>> masi_distance(set([1, 2]), set([1, 2, 3, 4])) + 0.665 + + Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI) + for Semantic and Pragmatic Annotation. + """ + + len_intersection = len(label1.intersection(label2)) + len_union = len(label1.union(label2)) + len_label1 = len(label1) + len_label2 = len(label2) + if len_label1 == len_label2 and len_label1 == len_intersection: + m = 1 + elif len_intersection == min(len_label1, len_label2): + m = 0.67 + elif len_intersection > 0: + m = 0.33 + else: + m = 0 + + return 1 - len_intersection / len_union * m + + +def interval_distance(label1, label2): + """Krippendorff's interval distance metric + + >>> from nltk.metrics import interval_distance + >>> interval_distance(1,10) + 81 + + Krippendorff 1980, Content Analysis: An Introduction to its Methodology + """ + + try: + return pow(label1 - label2, 2) + # return pow(list(label1)[0]-list(label2)[0],2) + except: + print("non-numeric labels not supported with interval distance") + + +def presence(label): + """Higher-order function to test presence of a given label""" + + return lambda x, y: 1.0 * ((label in x) == (label in y)) + + +def fractional_presence(label): + return ( + lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y) + or 0.0 * (label not in x and label not in y) + or abs(1.0 / len(x)) * (label in x and label not in y) + or (1.0 / len(y)) * (label not in x and label in y) + ) + + +def custom_distance(file): + data = {} + with open(file) as infile: + for l in infile: + labelA, labelB, dist = l.strip().split("\t") + labelA = frozenset([labelA]) + labelB = frozenset([labelB]) + data[frozenset([labelA, labelB])] = float(dist) + return lambda x, y: data[frozenset([x, y])] + + +def jaro_similarity(s1, s2): + """ + Computes the Jaro similarity between 2 sequences from: + + Matthew A. Jaro (1989). Advances in record linkage methodology + as applied to the 1985 census of Tampa Florida. Journal of the + American Statistical Association. 84 (406): 414-20. + + The Jaro distance between is the min no. of single-character transpositions + required to change one word into another. The Jaro similarity formula from + https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance : + + ``jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)`` + + where + - `|s_i|` is the length of string `s_i` + - `m` is the no. of matching characters + - `t` is the half no. of possible transpositions. + """ + # First, store the length of the strings + # because they will be re-used several times. + len_s1, len_s2 = len(s1), len(s2) + + # The upper bound of the distance for being a matched character. + match_bound = max(len_s1, len_s2) // 2 - 1 + + # Initialize the counts for matches and transpositions. + matches = 0 # no.of matched characters in s1 and s2 + transpositions = 0 # no. of transpositions between s1 and s2 + flagged_1 = [] # positions in s1 which are matches to some character in s2 + flagged_2 = [] # positions in s2 which are matches to some character in s1 + + # Iterate through sequences, check for matches and compute transpositions. + for i in range(len_s1): # Iterate through each character. + upperbound = min(i + match_bound, len_s2 - 1) + lowerbound = max(0, i - match_bound) + for j in range(lowerbound, upperbound + 1): + if s1[i] == s2[j] and j not in flagged_2: + matches += 1 + flagged_1.append(i) + flagged_2.append(j) + break + flagged_2.sort() + for i, j in zip(flagged_1, flagged_2): + if s1[i] != s2[j]: + transpositions += 1 + + if matches == 0: + return 0 + else: + return ( + 1 + / 3 + * ( + matches / len_s1 + + matches / len_s2 + + (matches - transpositions // 2) / matches + ) + ) + + +def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4): + """ + The Jaro Winkler distance is an extension of the Jaro similarity in: + + William E. Winkler. 1990. String Comparator Metrics and Enhanced + Decision Rules in the Fellegi-Sunter Model of Record Linkage. + Proceedings of the Section on Survey Research Methods. + American Statistical Association: 354-359. + + such that: + + jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) ) + + where, + + - jaro_sim is the output from the Jaro Similarity, + see jaro_similarity() + - l is the length of common prefix at the start of the string + - this implementation provides an upperbound for the l value + to keep the prefixes.A common value of this upperbound is 4. + - p is the constant scaling factor to overweigh common prefixes. + The Jaro-Winkler similarity will fall within the [0, 1] bound, + given that max(p)<=0.25 , default is p=0.1 in Winkler (1990) + + + Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf + from "Table 5 Comparison of String Comparators Rescaled between 0 and 1" + + >>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"), + ... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"), + ... ("dixon", "dickson"), ("billy", "susan")] + + >>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000] + >>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000] + + One way to match the values on the Winkler's paper is to provide a different + p scaling factor for different pairs of strings, e.g. + + >>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1] + + >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors): + ... assert round(jaro_similarity(s1, s2), 3) == jscore + ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore + + + Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from + "Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names" + + >>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'), + ... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'), + ... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'), + ... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'), + ... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'), + ... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'), + ... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'), + ... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')] + + >>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926, + ... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905, + ... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000] + + >>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926, + ... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943, + ... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000] + + One way to match the values on the Winkler's paper is to provide a different + p scaling factor for different pairs of strings, e.g. + + >>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20, + ... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + + + >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors): + ... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]: + ... continue # Skip bad examples from the paper. + ... assert round(jaro_similarity(s1, s2), 3) == jscore + ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore + + + + This test-case proves that the output of Jaro-Winkler similarity depends on + the product l * p and not on the product max_l * p. Here the product max_l * p > 1 + however the product l * p <= 1 + + >>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3) + 0.88 + """ + # To ensure that the output of the Jaro-Winkler's similarity + # falls between [0,1], the product of l * p needs to be + # also fall between [0,1]. + if not 0 <= max_l * p <= 1: + warnings.warn( + str( + "The product `max_l * p` might not fall between [0,1]." + "Jaro-Winkler similarity might not be between 0 and 1." + ) + ) + + # Compute the Jaro similarity + jaro_sim = jaro_similarity(s1, s2) + + # Initialize the upper bound for the no. of prefixes. + # if user did not pre-define the upperbound, + # use shorter length between s1 and s2 + + # Compute the prefix matches. + l = 0 + # zip() will automatically loop until the end of shorter string. + for s1_i, s2_i in zip(s1, s2): + if s1_i == s2_i: + l += 1 + else: + break + if l == max_l: + break + # Return the similarity value as described in docstring. + return jaro_sim + (l * p * (1 - jaro_sim)) + + +def demo(): + string_distance_examples = [ + ("rain", "shine"), + ("abcdef", "acbdef"), + ("language", "lnaguaeg"), + ("language", "lnaugage"), + ("language", "lngauage"), + ] + for s1, s2 in string_distance_examples: + print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2)) + print( + f"Edit dist with transpositions btwn '{s1}' and '{s2}':", + edit_distance(s1, s2, transpositions=True), + ) + print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2)) + print( + f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':", + jaro_winkler_similarity(s1, s2), + ) + print( + f"Jaro-Winkler distance btwn '{s1}' and '{s2}':", + 1 - jaro_winkler_similarity(s1, s2), + ) + s1 = {1, 2, 3, 4} + s2 = {3, 4, 5} + print("s1:", s1) + print("s2:", s2) + print("Binary distance:", binary_distance(s1, s2)) + print("Jaccard distance:", jaccard_distance(s1, s2)) + print("MASI distance:", masi_distance(s1, s2)) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/paice.py b/venv/lib/python3.10/site-packages/nltk/metrics/paice.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7de1930b61654f9120a2ec2cd5bf6ef090fc47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/paice.py @@ -0,0 +1,389 @@ +# Natural Language Toolkit: Agreement Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Lauri Hallila +# URL: +# For license information, see LICENSE.TXT +# + +"""Counts Paice's performance statistics for evaluating stemming algorithms. + +What is required: + - A dictionary of words grouped by their real lemmas + - A dictionary of words grouped by stems from a stemming algorithm + +When these are given, Understemming Index (UI), Overstemming Index (OI), +Stemming Weight (SW) and Error-rate relative to truncation (ERRT) are counted. + +References: +Chris D. Paice (1994). An evaluation method for stemming algorithms. +In Proceedings of SIGIR, 42--50. +""" + +from math import sqrt + + +def get_words_from_dictionary(lemmas): + """ + Get original set of words used for analysis. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :type lemmas: dict(str): list(str) + :return: Set of words that exist as values in the dictionary + :rtype: set(str) + """ + words = set() + for lemma in lemmas: + words.update(set(lemmas[lemma])) + return words + + +def _truncate(words, cutlength): + """Group words by stems defined by truncating them at given length. + + :param words: Set of words used for analysis + :param cutlength: Words are stemmed by cutting at this length. + :type words: set(str) or list(str) + :type cutlength: int + :return: Dictionary where keys are stems and values are sets of words + corresponding to that stem. + :rtype: dict(str): set(str) + """ + stems = {} + for word in words: + stem = word[:cutlength] + try: + stems[stem].update([word]) + except KeyError: + stems[stem] = {word} + return stems + + +# Reference: https://en.wikipedia.org/wiki/Line-line_intersection +def _count_intersection(l1, l2): + """Count intersection between two line segments defined by coordinate pairs. + + :param l1: Tuple of two coordinate pairs defining the first line segment + :param l2: Tuple of two coordinate pairs defining the second line segment + :type l1: tuple(float, float) + :type l2: tuple(float, float) + :return: Coordinates of the intersection + :rtype: tuple(float, float) + """ + x1, y1 = l1[0] + x2, y2 = l1[1] + x3, y3 = l2[0] + x4, y4 = l2[1] + + denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + + if denominator == 0.0: # lines are parallel + if x1 == x2 == x3 == x4 == 0.0: + # When lines are parallel, they must be on the y-axis. + # We can ignore x-axis because we stop counting the + # truncation line when we get there. + # There are no other options as UI (x-axis) grows and + # OI (y-axis) diminishes when we go along the truncation line. + return (0.0, y4) + + x = ( + (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4) + ) / denominator + y = ( + (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4) + ) / denominator + return (x, y) + + +def _get_derivative(coordinates): + """Get derivative of the line from (0,0) to given coordinates. + + :param coordinates: A coordinate pair + :type coordinates: tuple(float, float) + :return: Derivative; inf if x is zero + :rtype: float + """ + try: + return coordinates[1] / coordinates[0] + except ZeroDivisionError: + return float("inf") + + +def _calculate_cut(lemmawords, stems): + """Count understemmed and overstemmed pairs for (lemma, stem) pair with common words. + + :param lemmawords: Set or list of words corresponding to certain lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmawords: set(str) or list(str) + :type stems: dict(str): set(str) + :return: Amount of understemmed and overstemmed pairs contributed by words + existing in both lemmawords and stems. + :rtype: tuple(float, float) + """ + umt, wmt = 0.0, 0.0 + for stem in stems: + cut = set(lemmawords) & set(stems[stem]) + if cut: + cutcount = len(cut) + stemcount = len(stems[stem]) + # Unachieved merge total + umt += cutcount * (len(lemmawords) - cutcount) + # Wrongly merged total + wmt += cutcount * (stemcount - cutcount) + return (umt, wmt) + + +def _calculate(lemmas, stems): + """Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + :return: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :rtype: tuple(float, float, float, float) + """ + + n = sum(len(lemmas[word]) for word in lemmas) + + gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0) + + for lemma in lemmas: + lemmacount = len(lemmas[lemma]) + + # Desired merge total + gdmt += lemmacount * (lemmacount - 1) + + # Desired non-merge total + gdnt += lemmacount * (n - lemmacount) + + # For each (lemma, stem) pair with common words, count how many + # pairs are understemmed and overstemmed. + umt, wmt = _calculate_cut(lemmas[lemma], stems) + + # Add to total undesired and wrongly-merged totals + gumt += umt + gwmt += wmt + + # Each object is counted twice, so divide by two + return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2) + + +def _indexes(gumt, gdmt, gwmt, gdnt): + """Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). + + :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :type gumt, gdmt, gwmt, gdnt: float + :return: Understemming Index (UI), + Overstemming Index (OI) and + Stemming Weight (SW). + :rtype: tuple(float, float, float) + """ + # Calculate Understemming Index (UI), + # Overstemming Index (OI) and Stemming Weight (SW) + try: + ui = gumt / gdmt + except ZeroDivisionError: + # If GDMT (max merge total) is 0, define UI as 0 + ui = 0.0 + try: + oi = gwmt / gdnt + except ZeroDivisionError: + # IF GDNT (max non-merge total) is 0, define OI as 0 + oi = 0.0 + try: + sw = oi / ui + except ZeroDivisionError: + if oi == 0.0: + # OI and UI are 0, define SW as 'not a number' + sw = float("nan") + else: + # UI is 0, define SW as infinity + sw = float("inf") + return (ui, oi, sw) + + +class Paice: + """Class for storing lemmas, stems and evaluation metrics.""" + + def __init__(self, lemmas, stems): + """ + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + """ + self.lemmas = lemmas + self.stems = stems + self.coords = [] + self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None) + self.ui, self.oi, self.sw = (None, None, None) + self.errt = None + self.update() + + def __str__(self): + text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt] + text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt) + text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt) + text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt) + text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui) + text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi) + text.append("Stemming Weight (OI / UI): %s\n" % self.sw) + text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt) + coordinates = " ".join(["(%s, %s)" % item for item in self.coords]) + text.append("Truncation line: %s" % coordinates) + return "".join(text) + + def _get_truncation_indexes(self, words, cutlength): + """Count (UI, OI) when stemming is done by truncating words at \'cutlength\'. + + :param words: Words used for the analysis + :param cutlength: Words are stemmed by cutting them at this length + :type words: set(str) or list(str) + :type cutlength: int + :return: Understemming and overstemming indexes + :rtype: tuple(int, int) + """ + + truncated = _truncate(words, cutlength) + gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated) + ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2] + return (ui, oi) + + def _get_truncation_coordinates(self, cutlength=0): + """Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line. + + :param cutlength: Optional parameter to start counting from (ui, oi) + coordinates gotten by stemming at this length. Useful for speeding up + the calculations when you know the approximate location of the + intersection. + :type cutlength: int + :return: List of coordinate pairs that define the truncation line + :rtype: list(tuple(float, float)) + """ + words = get_words_from_dictionary(self.lemmas) + maxlength = max(len(word) for word in words) + + # Truncate words from different points until (0, 0) - (ui, oi) segment crosses the truncation line + coords = [] + while cutlength <= maxlength: + # Get (UI, OI) pair of current truncation point + pair = self._get_truncation_indexes(words, cutlength) + + # Store only new coordinates so we'll have an actual + # line segment when counting the intersection point + if pair not in coords: + coords.append(pair) + if pair == (0.0, 0.0): + # Stop counting if truncation line goes through origo; + # length from origo to truncation line is 0 + return coords + if len(coords) >= 2 and pair[0] > 0.0: + derivative1 = _get_derivative(coords[-2]) + derivative2 = _get_derivative(coords[-1]) + # Derivative of the truncation line is a decreasing value; + # when it passes Stemming Weight, we've found the segment + # of truncation line intersecting with (0, 0) - (ui, oi) segment + if derivative1 >= self.sw >= derivative2: + return coords + cutlength += 1 + return coords + + def _errt(self): + """Count Error-Rate Relative to Truncation (ERRT). + + :return: ERRT, length of the line from origo to (UI, OI) divided by + the length of the line from origo to the point defined by the same + line when extended until the truncation line. + :rtype: float + """ + # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line + self.coords = self._get_truncation_coordinates() + if (0.0, 0.0) in self.coords: + # Truncation line goes through origo, so ERRT cannot be counted + if (self.ui, self.oi) != (0.0, 0.0): + return float("inf") + else: + return float("nan") + if (self.ui, self.oi) == (0.0, 0.0): + # (ui, oi) is origo; define errt as 0.0 + return 0.0 + # Count the intersection point + # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates + # so we have actual line segments instead of a line segment and a point + intersection = _count_intersection( + ((0, 0), (self.ui, self.oi)), self.coords[-2:] + ) + # Count OP (length of the line from origo to (ui, oi)) + op = sqrt(self.ui**2 + self.oi**2) + # Count OT (length of the line from origo to truncation line that goes through (ui, oi)) + ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2) + # OP / OT tells how well the stemming algorithm works compared to just truncating words + return op / ot + + def update(self): + """Update statistics after lemmas and stems have been set.""" + self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems) + self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt) + self.errt = self._errt() + + +def demo(): + """Demonstration of the module.""" + # Some words with their real lemmas + lemmas = { + "kneel": ["kneel", "knelt"], + "range": ["range", "ranged"], + "ring": ["ring", "rang", "rung"], + } + # Same words with stems from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang", "range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Words grouped by their lemmas:") + for lemma in sorted(lemmas): + print("{} => {}".format(lemma, " ".join(lemmas[lemma]))) + print() + print("Same words grouped by a stemming algorithm:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p = Paice(lemmas, stems) + print(p) + print() + # Let's "change" results from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang"], + "range": ["range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Counting stats after changing stemming results:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p.stems = stems + p.update() + print(p) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/scores.py b/venv/lib/python3.10/site-packages/nltk/metrics/scores.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6d296aa62893788de65cdd0cdf3f5480a161f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/scores.py @@ -0,0 +1,228 @@ +# Natural Language Toolkit: Evaluation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import operator +from functools import reduce +from math import fabs +from random import shuffle + +try: + from scipy.stats.stats import betai +except ImportError: + betai = None + +from nltk.util import LazyConcatenation, LazyMap + + +def accuracy(reference, test): + """ + Given a list of reference values and a corresponding list of test + values, return the fraction of corresponding values that are + equal. In particular, return the fraction of indices + ``0= actual_stat: + c += 1 + + if verbose and i % 10 == 0: + print("pseudo-statistic: %f" % pseudo_stat) + print("significance: %f" % ((c + 1) / (i + 1))) + print("-" * 60) + + significance = (c + 1) / (shuffles + 1) + + if verbose: + print("significance: %f" % significance) + if betai: + for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]: + print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}") + + return (significance, c, shuffles) + + +def demo(): + print("-" * 75) + reference = "DET NN VB DET JJ NN NN IN DET NN".split() + test = "DET VB VB DET NN NN NN IN DET NN".split() + print("Reference =", reference) + print("Test =", test) + print("Accuracy:", accuracy(reference, test)) + + print("-" * 75) + reference_set = set(reference) + test_set = set(test) + print("Reference =", reference_set) + print("Test = ", test_set) + print("Precision:", precision(reference_set, test_set)) + print(" Recall:", recall(reference_set, test_set)) + print("F-Measure:", f_measure(reference_set, test_set)) + print("-" * 75) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/segmentation.py b/venv/lib/python3.10/site-packages/nltk/metrics/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..518197d35dff62ce5735b8e76fb5939b19ebedd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/segmentation.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: Text Segmentation Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# David Doukhan +# URL: +# For license information, see LICENSE.TXT + + +""" +Text Segmentation Metrics + +1. Windowdiff + +Pevzner, L., and Hearst, M., A Critique and Improvement of + an Evaluation Metric for Text Segmentation, + Computational Linguistics 28, 19-36 + + +2. Generalized Hamming Distance + +Bookstein A., Kulyukin V.A., Raita T. +Generalized Hamming Distance +Information Retrieval 5, 2002, pp 353-375 + +Baseline implementation in C++ +http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html + +Study describing benefits of Generalized Hamming Distance Versus +WindowDiff for evaluating text segmentation tasks +Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ? +TALN 2009 + + +3. Pk text segmentation metric + +Beeferman D., Berger A., Lafferty J. (1999) +Statistical Models for Text Segmentation +Machine Learning, 34, 177-210 +""" + +try: + import numpy as np +except ImportError: + pass + + +def windowdiff(seg1, seg2, k, boundary="1", weighted=False): + """ + Compute the windowdiff score for a pair of segmentations. A + segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + >>> s1 = "000100000010" + >>> s2 = "000010000100" + >>> s3 = "100000010000" + >>> '%.2f' % windowdiff(s1, s1, 3) + '0.00' + >>> '%.2f' % windowdiff(s1, s2, 3) + '0.30' + >>> '%.2f' % windowdiff(s2, s3, 3) + '0.80' + + :param seg1: a segmentation + :type seg1: str or list + :param seg2: a segmentation + :type seg2: str or list + :param k: window width + :type k: int + :param boundary: boundary value + :type boundary: str or int or bool + :param weighted: use the weighted variant of windowdiff + :type weighted: boolean + :rtype: float + """ + + if len(seg1) != len(seg2): + raise ValueError("Segmentations have unequal length") + if k > len(seg1): + raise ValueError( + "Window width k should be smaller or equal than segmentation lengths" + ) + wd = 0 + for i in range(len(seg1) - k + 1): + ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary)) + if weighted: + wd += ndiff + else: + wd += min(1, ndiff) + return wd / (len(seg1) - k + 1.0) + + +# Generalized Hamming Distance + + +def _init_mat(nrows, ncols, ins_cost, del_cost): + mat = np.empty((nrows, ncols)) + mat[0, :] = ins_cost * np.arange(ncols) + mat[:, 0] = del_cost * np.arange(nrows) + return mat + + +def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff): + for i, rowi in enumerate(rowv): + for j, colj in enumerate(colv): + shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j] + if rowi == colj: + # boundaries are at the same location, no transformation required + tcost = mat[i, j] + elif rowi > colj: + # boundary match through a deletion + tcost = del_cost + mat[i, j + 1] + else: + # boundary match through an insertion + tcost = ins_cost + mat[i + 1, j] + mat[i + 1, j + 1] = min(tcost, shift_cost) + + +def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"): + """ + Compute the Generalized Hamming Distance for a reference and a hypothetical + segmentation, corresponding to the cost related to the transformation + of the hypothetical segmentation into the reference segmentation + through boundary insertion, deletion and shift operations. + + A segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + Recommended parameter values are a shift_cost_coeff of 2. + Associated with a ins_cost, and del_cost equal to the mean segment + length in the reference segmentation. + + >>> # Same examples as Kulyukin C++ implementation + >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5) + 0.5 + >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5) + 2.0 + >>> ghd('011', '110', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('1', '0', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('111', '000', 1.0, 1.0, 0.5) + 3.0 + >>> ghd('000', '111', 1.0, 2.0, 0.5) + 6.0 + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the hypothetical segmentation + :type hyp: str or list + :param ins_cost: insertion cost + :type ins_cost: float + :param del_cost: deletion cost + :type del_cost: float + :param shift_cost_coeff: constant used to compute the cost of a shift. + ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j`` + are the positions indicating the shift + :type shift_cost_coeff: float + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + ref_idx = [i for (i, val) in enumerate(ref) if val == boundary] + hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary] + + nref_bound = len(ref_idx) + nhyp_bound = len(hyp_idx) + + if nref_bound == 0 and nhyp_bound == 0: + return 0.0 + elif nref_bound > 0 and nhyp_bound == 0: + return nref_bound * ins_cost + elif nref_bound == 0 and nhyp_bound > 0: + return nhyp_bound * del_cost + + mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost) + _ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff) + return mat[-1, -1] + + +# Beeferman's Pk text segmentation evaluation metric + + +def pk(ref, hyp, k=None, boundary="1"): + """ + Compute the Pk metric for a pair of segmentations A segmentation + is any sequence over a vocabulary of two items (e.g. "0", "1"), + where the specified boundary value is used to mark the edge of a + segmentation. + + >>> '%.2f' % pk('0100'*100, '1'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0100'*100, 2) + '0.00' + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the segmentation to evaluate + :type hyp: str or list + :param k: window size, if None, set to half of the average reference segment length + :type boundary: str or int or bool + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + if k is None: + k = int(round(len(ref) / (ref.count(boundary) * 2.0))) + + err = 0 + for i in range(len(ref) - k + 1): + r = ref[i : i + k].count(boundary) > 0 + h = hyp[i : i + k].count(boundary) > 0 + if r != h: + err += 1 + return err / (len(ref) - k + 1.0) diff --git a/venv/lib/python3.10/site-packages/nltk/metrics/spearman.py b/venv/lib/python3.10/site-packages/nltk/metrics/spearman.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2987d371a2af218c7223e155b335315663fd2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/metrics/spearman.py @@ -0,0 +1,68 @@ +# Natural Language Toolkit: Spearman Rank Correlation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT + +""" +Tools for comparing ranked lists. +""" + + +def _rank_dists(ranks1, ranks2): + """Finds the difference between the values in ranks1 and ranks2 for keys + present in both dicts. If the arguments are not dicts, they are converted + from (key, rank) sequences. + """ + ranks1 = dict(ranks1) + ranks2 = dict(ranks2) + for k in ranks1: + try: + yield k, ranks1[k] - ranks2[k] + except KeyError: + pass + + +def spearman_correlation(ranks1, ranks2): + """Returns the Spearman correlation coefficient for two rankings, which + should be dicts or sequences of (key, rank). The coefficient ranges from + -1.0 (ranks are opposite) to 1.0 (ranks are identical), and is only + calculated for keys in both rankings (for meaningful results, remove keys + present in only one list before ranking).""" + n = 0 + res = 0 + for k, d in _rank_dists(ranks1, ranks2): + res += d * d + n += 1 + try: + return 1 - (6 * res / (n * (n * n - 1))) + except ZeroDivisionError: + # Result is undefined if only one item is ranked + return 0.0 + + +def ranks_from_sequence(seq): + """Given a sequence, yields each element with an increasing rank, suitable + for use as an argument to ``spearman_correlation``. + """ + return ((k, i) for i, k in enumerate(seq)) + + +def ranks_from_scores(scores, rank_gap=1e-15): + """Given a sequence of (key, score) tuples, yields each key with an + increasing rank, tying with previous key's rank if the difference between + their scores is less than rank_gap. Suitable for use as an argument to + ``spearman_correlation``. + """ + prev_score = None + rank = 0 + for i, (key, score) in enumerate(scores): + try: + if abs(score - prev_score) > rank_gap: + rank = i + except TypeError: + pass + + yield key, rank + prev_score = score diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..469e2ab042ff67fb73d4afe70a848f55b3132f4a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__init__.py b/venv/lib/python3.10/site-packages/nltk/tokenize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5162796f751878d3521aaf66de56fac11b2a2dd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/__init__.py @@ -0,0 +1,132 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Contributors: matthewmc, clouds56 +# URL: +# For license information, see LICENSE.TXT + +r""" +NLTK Tokenizer Package + +Tokenizers divide strings into lists of substrings. For example, +tokenizers can be used to find the words and punctuation in a string: + + >>> from nltk.tokenize import word_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> word_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +This particular tokenizer requires the Punkt sentence tokenization +models to be installed. NLTK also provides a simpler, +regular-expression based tokenizer, which splits text on whitespace +and punctuation: + + >>> from nltk.tokenize import wordpunct_tokenize + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +We can also operate at the level of sentences, using the sentence +tokenizer directly as follows: + + >>> from nltk.tokenize import sent_tokenize, word_tokenize + >>> sent_tokenize(s) + ['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.'] + >>> [word_tokenize(t) for t in sent_tokenize(s)] # doctest: +NORMALIZE_WHITESPACE + [['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'], + ['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']] + +Caution: when tokenizing a Unicode string, make sure you are not +using an encoded version of the string (it may be necessary to +decode it first, e.g. with ``s.decode("utf8")``. + +NLTK tokenizers can produce token-spans, represented as tuples of integers +having the same semantics as string slices, to support efficient comparison +of tokenizers. (These methods are implemented as generators.) + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> list(WhitespaceTokenizer().span_tokenize(s)) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44), + (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + +There are numerous ways to tokenize text. If you need more control over +tokenization, see the other methods provided in this package. + +For further information, please see Chapter 3 of the NLTK book. +""" + +import re + +from nltk.data import load +from nltk.tokenize.casual import TweetTokenizer, casual_tokenize +from nltk.tokenize.destructive import NLTKWordTokenizer +from nltk.tokenize.legality_principle import LegalitySyllableTokenizer +from nltk.tokenize.mwe import MWETokenizer +from nltk.tokenize.punkt import PunktSentenceTokenizer +from nltk.tokenize.regexp import ( + BlanklineTokenizer, + RegexpTokenizer, + WhitespaceTokenizer, + WordPunctTokenizer, + blankline_tokenize, + regexp_tokenize, + wordpunct_tokenize, +) +from nltk.tokenize.repp import ReppTokenizer +from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize +from nltk.tokenize.simple import ( + LineTokenizer, + SpaceTokenizer, + TabTokenizer, + line_tokenize, +) +from nltk.tokenize.sonority_sequencing import SyllableTokenizer +from nltk.tokenize.stanford_segmenter import StanfordSegmenter +from nltk.tokenize.texttiling import TextTilingTokenizer +from nltk.tokenize.toktok import ToktokTokenizer +from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +# Standard sentence tokenizer. +def sent_tokenize(text, language="english"): + """ + Return a sentence-tokenized copy of *text*, + using NLTK's recommended sentence tokenizer + (currently :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into sentences + :param language: the model name in the Punkt corpus + """ + tokenizer = load(f"tokenizers/punkt/{language}.pickle") + return tokenizer.tokenize(text) + + +# Standard word tokenizer. +_treebank_word_tokenizer = NLTKWordTokenizer() + + +def word_tokenize(text, language="english", preserve_line=False): + """ + Return a tokenized copy of *text*, + using NLTK's recommended word tokenizer + (currently an improved :class:`.TreebankWordTokenizer` + along with :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into words + :type text: str + :param language: the model name in the Punkt corpus + :type language: str + :param preserve_line: A flag to decide whether to sentence tokenize the text or not. + :type preserve_line: bool + """ + sentences = [text] if preserve_line else sent_tokenize(text, language) + return [ + token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent) + ] diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py b/venv/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py new file mode 100644 index 0000000000000000000000000000000000000000..547827cefe1af65209e1f44237b7ac160b167920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Legality Principle is a language agnostic principle maintaining that syllable +onsets and codas (the beginning and ends of syllables not including the vowel) +are only legal if they are found as word onsets or codas in the language. The English +word ''admit'' must then be syllabified as ''ad-mit'' since ''dm'' is not found +word-initially in the English language (Bartlett et al.). This principle was first proposed +in Daniel Kahn's 1976 dissertation, ''Syllable-based generalizations in English phonology''. + +Kahn further argues that there is a ''strong tendency to syllabify in such a way that +initial clusters are of maximal length, consistent with the general constraints on +word-initial consonant clusters.'' Consequently, in addition to being legal onsets, +the longest legal onset is preferable---''Onset Maximization''. + +The default implementation assumes an English vowel set, but the `vowels` attribute +can be set to IPA or any other alphabet's vowel set for the use-case. +Both a valid set of vowels as well as a text corpus of words in the language +are necessary to determine legal onsets and subsequently syllabify words. + +The legality principle with onset maximization is a universal syllabification algorithm, +but that does not mean it performs equally across languages. Bartlett et al. (2009) +is a good benchmark for English accuracy if utilizing IPA (pg. 311). + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Theo Vennemann, ''On the Theory of Syllabic Phonology,'' 1972, p. 11. +- Daniel Kahn, ''Syllable-based generalizations in English phonology'', (PhD diss., MIT, 1976). +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Jeremy Goslin and Ulrich Frauenfelder. 2001. A comparison of theoretical and human syllabification. Language and Speech, 44:409–436. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +- Christopher Hench. 2017. Resonances in Middle High German: New Methodologies in Prosody. UC Berkeley. +""" + +from collections import Counter + +from nltk.tokenize.api import TokenizerI + + +class LegalitySyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Legality Principle and Onset Maximization. + + >>> from nltk.tokenize import LegalitySyllableTokenizer + >>> from nltk import word_tokenize + >>> from nltk.corpus import words + >>> text = "This is a wonderful sentence." + >>> text_words = word_tokenize(text) + >>> LP = LegalitySyllableTokenizer(words.words()) + >>> [LP.tokenize(word) for word in text_words] + [['This'], ['is'], ['a'], ['won', 'der', 'ful'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__( + self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 + ): + """ + :param tokenized_source_text: List of valid tokens in the language + :type tokenized_source_text: list(str) + :param vowels: Valid vowels in language or IPA representation + :type vowels: str + :param legal_frequency_threshold: Lowest frequency of all onsets to be considered a legal onset + :type legal_frequency_threshold: float + """ + self.legal_frequency_threshold = legal_frequency_threshold + self.vowels = vowels + self.legal_onsets = self.find_legal_onsets(tokenized_source_text) + + def find_legal_onsets(self, words): + """ + Gathers all onsets and then return only those above the frequency threshold + + :param words: List of words in a language + :type words: list(str) + :return: Set of legal onsets + :rtype: set(str) + """ + onsets = [self.onset(word) for word in words] + legal_onsets = [ + k + for k, v in Counter(onsets).items() + if (v / len(onsets)) > self.legal_frequency_threshold + ] + return set(legal_onsets) + + def onset(self, word): + """ + Returns consonant cluster of word, i.e. all characters until the first vowel. + + :param word: Single word or token + :type word: str + :return: String of characters of onset + :rtype: str + """ + onset = "" + for c in word.lower(): + if c in self.vowels: + return onset + else: + onset += c + return onset + + def tokenize(self, token): + """ + Apply the Legality Principle in combination with + Onset Maximization to return a list of syllables. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + syllables = [] + syllable, current_onset = "", "" + vowel, onset = False, False + for char in token[::-1]: + char_lower = char.lower() + if not vowel: + syllable += char + vowel = bool(char_lower in self.vowels) + else: + if char_lower + current_onset[::-1] in self.legal_onsets: + syllable += char + current_onset += char_lower + onset = True + elif char_lower in self.vowels and not onset: + syllable += char + current_onset += char_lower + else: + syllables.append(syllable) + syllable = char + current_onset = "" + vowel = bool(char_lower in self.vowels) + syllables.append(syllable) + syllables_ordered = [syllable[::-1] for syllable in syllables][::-1] + return syllables_ordered diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/mwe.py b/venv/lib/python3.10/site-packages/nltk/tokenize/mwe.py new file mode 100644 index 0000000000000000000000000000000000000000..c39244c7b1c7a9be96331548150c60ce9aaae8be --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/mwe.py @@ -0,0 +1,124 @@ +# Multi-Word Expression tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rob Malouf +# URL: +# For license information, see LICENSE.TXT + +""" +Multi-Word Expression Tokenizer + +A ``MWETokenizer`` takes a string which has already been divided into tokens and +retokenizes it, merging multi-word expressions into single tokens, using a lexicon +of MWEs: + + + >>> from nltk.tokenize import MWETokenizer + + >>> tokenizer = MWETokenizer([('a', 'little'), ('a', 'little', 'bit'), ('a', 'lot')]) + >>> tokenizer.add_mwe(('in', 'spite', 'of')) + + >>> tokenizer.tokenize('Testing testing testing one two three'.split()) + ['Testing', 'testing', 'testing', 'one', 'two', 'three'] + + >>> tokenizer.tokenize('This is a test in spite'.split()) + ['This', 'is', 'a', 'test', 'in', 'spite'] + + >>> tokenizer.tokenize('In a little or a little bit or a lot in spite of'.split()) + ['In', 'a_little', 'or', 'a_little_bit', 'or', 'a_lot', 'in_spite_of'] + +""" +from nltk.tokenize.api import TokenizerI +from nltk.util import Trie + + +class MWETokenizer(TokenizerI): + """A tokenizer that processes tokenized text and merges multi-word expressions + into single tokens. + """ + + def __init__(self, mwes=None, separator="_"): + """Initialize the multi-word tokenizer with a list of expressions and a + separator + + :type mwes: list(list(str)) + :param mwes: A sequence of multi-word expressions to be merged, where + each MWE is a sequence of strings. + :type separator: str + :param separator: String that should be inserted between words in a multi-word + expression token. (Default is '_') + + """ + if not mwes: + mwes = [] + self._mwes = Trie(mwes) + self._separator = separator + + def add_mwe(self, mwe): + """Add a multi-word expression to the lexicon (stored as a word trie) + + We use ``util.Trie`` to represent the trie. Its form is a dict of dicts. + The key True marks the end of a valid MWE. + + :param mwe: The multi-word expression we're adding into the word trie + :type mwe: tuple(str) or list(str) + + :Example: + + >>> tokenizer = MWETokenizer() + >>> tokenizer.add_mwe(('a', 'b')) + >>> tokenizer.add_mwe(('a', 'b', 'c')) + >>> tokenizer.add_mwe(('a', 'x')) + >>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}} + >>> tokenizer._mwes == expected + True + + """ + self._mwes.insert(mwe) + + def tokenize(self, text): + """ + + :param text: A list containing tokenized text + :type text: list(str) + :return: A list of the tokenized text with multi-words merged together + :rtype: list(str) + + :Example: + + >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+') + >>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split()) + ['An', "hors+d'oeuvre", 'tonight,', 'sir?'] + + """ + i = 0 + n = len(text) + result = [] + + while i < n: + if text[i] in self._mwes: + # possible MWE match + j = i + trie = self._mwes + last_match = -1 + while j < n and text[j] in trie: # and len(trie[text[j]]) > 0 : + trie = trie[text[j]] + j = j + 1 + if Trie.LEAF in trie: + last_match = j + else: + if last_match > -1: + j = last_match + + if Trie.LEAF in trie or last_match > -1: + # success! + result.append(self._separator.join(text[i:j])) + i = j + else: + # no match, so backtrack + result.append(text[i]) + i += 1 + else: + result.append(text[i]) + i += 1 + return result diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/nist.py b/venv/lib/python3.10/site-packages/nltk/tokenize/nist.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e13dad28b81d91891a838d89bcdf5a0c1ad086 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/nist.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the mteval-v14.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Liling Tan (ported from ftp://jaguar.ncsl.nist.gov/mt/resources/mteval-v14.pl) +# Contributors: Ozan Caglayan, Wiktor Stribizew +# +# URL: +# For license information, see LICENSE.TXT + +""" +This is a NLTK port of the tokenizer used in the NIST BLEU evaluation script, +https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L926 +which was also ported into Python in +https://github.com/lium-lst/nmtpy/blob/master/nmtpy/metrics/mtevalbleu.py#L162 +""" + + +import io +import re + +from nltk.corpus import perluniprops +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import xml_unescape + + +class NISTTokenizer(TokenizerI): + """ + This NIST tokenizer is sentence-based instead of the original + paragraph-based tokenization from mteval-14.pl; The sentence-based + tokenization is consistent with the other tokenizers available in NLTK. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + >>> s = "Good muffins cost $3.88 in New York." + >>> expected_lower = [u'good', u'muffins', u'cost', u'$', u'3.88', u'in', u'new', u'york', u'.'] + >>> expected_cased = [u'Good', u'muffins', u'cost', u'$', u'3.88', u'in', u'New', u'York', u'.'] + >>> nist.tokenize(s, lowercase=False) == expected_cased + True + >>> nist.tokenize(s, lowercase=True) == expected_lower # Lowercased. + True + + The international_tokenize() is the preferred function when tokenizing + non-european text, e.g. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + + # Input strings. + >>> albb = u'Alibaba Group Holding Limited (Chinese: 阿里巴巴集团控股 有限公司) us a Chinese e-commerce company...' + >>> amz = u'Amazon.com, Inc. (/ˈæməzɒn/) is an American electronic commerce...' + >>> rkt = u'Rakuten, Inc. (楽天株式会社 Rakuten Kabushiki-gaisha) is a Japanese electronic commerce and Internet company based in Tokyo.' + + # Expected tokens. + >>> expected_albb = [u'Alibaba', u'Group', u'Holding', u'Limited', u'(', u'Chinese', u':', u'\u963f\u91cc\u5df4\u5df4\u96c6\u56e2\u63a7\u80a1', u'\u6709\u9650\u516c\u53f8', u')'] + >>> expected_amz = [u'Amazon', u'.', u'com', u',', u'Inc', u'.', u'(', u'/', u'\u02c8\xe6', u'm'] + >>> expected_rkt = [u'Rakuten', u',', u'Inc', u'.', u'(', u'\u697d\u5929\u682a\u5f0f\u4f1a\u793e', u'Rakuten', u'Kabushiki', u'-', u'gaisha'] + + >>> nist.international_tokenize(albb)[:10] == expected_albb + True + >>> nist.international_tokenize(amz)[:10] == expected_amz + True + >>> nist.international_tokenize(rkt)[:10] == expected_rkt + True + + # Doctest for patching issue #1926 + >>> sent = u'this is a foo\u2604sentence.' + >>> expected_sent = [u'this', u'is', u'a', u'foo', u'\u2604', u'sentence', u'.'] + >>> nist.international_tokenize(sent) == expected_sent + True + """ + + # Strip "skipped" tags + STRIP_SKIP = re.compile(""), "" + # Strip end-of-line hyphenation and join lines + STRIP_EOL_HYPHEN = re.compile("\u2028"), " " + # Tokenize punctuation. + PUNCT = re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), " \\1 " + # Tokenize period and comma unless preceded by a digit. + PERIOD_COMMA_PRECEED = re.compile(r"([^0-9])([\.,])"), "\\1 \\2 " + # Tokenize period and comma unless followed by a digit. + PERIOD_COMMA_FOLLOW = re.compile(r"([\.,])([^0-9])"), " \\1 \\2" + # Tokenize dash when preceded by a digit + DASH_PRECEED_DIGIT = re.compile("([0-9])(-)"), "\\1 \\2 " + + LANG_DEPENDENT_REGEXES = [ + PUNCT, + PERIOD_COMMA_PRECEED, + PERIOD_COMMA_FOLLOW, + DASH_PRECEED_DIGIT, + ] + + # Perluniprops characters used in NIST tokenizer. + pup_number = str("".join(set(perluniprops.chars("Number")))) # i.e. \p{N} + pup_punct = str("".join(set(perluniprops.chars("Punctuation")))) # i.e. \p{P} + pup_symbol = str("".join(set(perluniprops.chars("Symbol")))) # i.e. \p{S} + + # Python regexes needs to escape some special symbols, see + # see https://stackoverflow.com/q/45670950/610569 + number_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_number) + punct_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_punct) + symbol_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_symbol) + + # Note: In the original perl implementation, \p{Z} and \p{Zl} were used to + # (i) strip trailing and heading spaces and + # (ii) de-deuplicate spaces. + # In Python, this would do: ' '.join(str.strip().split()) + # Thus, the next two lines were commented out. + # Line_Separator = str(''.join(perluniprops.chars('Line_Separator'))) # i.e. \p{Zl} + # Separator = str(''.join(perluniprops.chars('Separator'))) # i.e. \p{Z} + + # Pads non-ascii strings with space. + NONASCII = re.compile("([\x00-\x7f]+)"), r" \1 " + # Tokenize any punctuation unless followed AND preceded by a digit. + PUNCT_1 = ( + re.compile(f"([{number_regex}])([{punct_regex}])"), + "\\1 \\2 ", + ) + PUNCT_2 = ( + re.compile(f"([{punct_regex}])([{number_regex}])"), + " \\1 \\2", + ) + # Tokenize symbols + SYMBOLS = re.compile(f"([{symbol_regex}])"), " \\1 " + + INTERNATIONAL_REGEXES = [NONASCII, PUNCT_1, PUNCT_2, SYMBOLS] + + def lang_independent_sub(self, text): + """Performs the language independent string substituitions.""" + # It's a strange order of regexes. + # It'll be better to unescape after STRIP_EOL_HYPHEN + # but let's keep it close to the original NIST implementation. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + text = xml_unescape(text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + return text + + def tokenize(self, text, lowercase=False, western_lang=True, return_str=False): + text = str(text) + # Language independent regex. + text = self.lang_independent_sub(text) + # Language dependent regex. + if western_lang: + # Pad string with whitespace. + text = " " + text + " " + if lowercase: + text = text.lower() + for regexp, substitution in self.LANG_DEPENDENT_REGEXES: + text = regexp.sub(substitution, text) + # Remove contiguous whitespaces. + text = " ".join(text.split()) + # Finally, strips heading and trailing spaces + # and converts output string into unicode. + text = str(text.strip()) + return text if return_str else text.split() + + def international_tokenize( + self, text, lowercase=False, split_non_ascii=True, return_str=False + ): + text = str(text) + # Different from the 'normal' tokenize(), STRIP_EOL_HYPHEN is applied + # first before unescaping. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + text = xml_unescape(text) + + if lowercase: + text = text.lower() + + for regexp, substitution in self.INTERNATIONAL_REGEXES: + text = regexp.sub(substitution, text) + + # Make sure that there's only one space only between words. + # Strip leading and trailing spaces. + text = " ".join(text.strip().split()) + return text if return_str else text.split() diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/punkt.py b/venv/lib/python3.10/site-packages/nltk/tokenize/punkt.py new file mode 100644 index 0000000000000000000000000000000000000000..129bd49c270c301d97a44eec5e58d7e19f15cabe --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/punkt.py @@ -0,0 +1,1767 @@ +# Natural Language Toolkit: Punkt sentence tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kiss & Strunk (2006) +# Author: Willy (original Python port) +# Steven Bird (additions) +# Edward Loper (rewrite) +# Joel Nothman (almost rewrite) +# Arthur Darcet (fixes) +# Tom Aarsen <> (tackle ReDoS & performance issues) +# URL: +# For license information, see LICENSE.TXT + +r""" +Punkt Sentence Tokenizer + +This tokenizer divides a text into a list of sentences +by using an unsupervised algorithm to build a model for abbreviation +words, collocations, and words that start sentences. It must be +trained on a large collection of plaintext in the target language +before it can be used. + +The NLTK data package includes a pre-trained Punkt tokenizer for +English. + + >>> import nltk.data + >>> text = ''' + ... Punkt knows that the periods in Mr. Smith and Johann S. Bach + ... do not mark sentence boundaries. And sometimes sentences + ... can start with non-capitalized words. i is a good variable + ... name. + ... ''' + >>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') + >>> print('\n-----\n'.join(sent_detector.tokenize(text.strip()))) + Punkt knows that the periods in Mr. Smith and Johann S. Bach + do not mark sentence boundaries. + ----- + And sometimes sentences + can start with non-capitalized words. + ----- + i is a good variable + name. + +(Note that whitespace from the original text, including newlines, is +retained in the output.) + +Punctuation following sentences is also included by default +(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries +flag. + + >>> text = ''' + ... (How does it deal with this parenthesis?) "It should be part of the + ... previous sentence." "(And the same with this one.)" ('And this one!') + ... "('(And (this)) '?)" [(and this. )] + ... ''' + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip()))) + (How does it deal with this parenthesis?) + ----- + "It should be part of the + previous sentence." + ----- + "(And the same with this one.)" + ----- + ('And this one!') + ----- + "('(And (this)) '?)" + ----- + [(and this. )] + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip(), realign_boundaries=False))) + (How does it deal with this parenthesis? + ----- + ) "It should be part of the + previous sentence. + ----- + " "(And the same with this one. + ----- + )" ('And this one! + ----- + ') + "('(And (this)) '? + ----- + )" [(and this. + ----- + )] + +However, Punkt is designed to learn parameters (a list of abbreviations, etc.) +unsupervised from a corpus similar to the target domain. The pre-packaged models +may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn +parameters from the given text. + +:class:`.PunktTrainer` learns parameters such as a list of abbreviations +(without supervision) from portions of text. Using a ``PunktTrainer`` directly +allows for incremental training and modification of the hyper-parameters used +to decide what is considered an abbreviation, etc. + +The algorithm for this tokenizer is described in:: + + Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence + Boundary Detection. Computational Linguistics 32: 485-525. +""" + +# TODO: Make orthographic heuristic less susceptible to overtraining +# TODO: Frequent sentence starters optionally exclude always-capitalised words +# FIXME: Problem with ending string with e.g. '!!!' -> '!! !' + +import math +import re +import string +from collections import defaultdict +from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union + +from nltk.probability import FreqDist +from nltk.tokenize.api import TokenizerI + +###################################################################### +# { Orthographic Context Constants +###################################################################### +# The following constants are used to describe the orthographic +# contexts in which a word can occur. BEG=beginning, MID=middle, +# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case. + +_ORTHO_BEG_UC = 1 << 1 +"""Orthographic context: beginning of a sentence with upper case.""" + +_ORTHO_MID_UC = 1 << 2 +"""Orthographic context: middle of a sentence with upper case.""" + +_ORTHO_UNK_UC = 1 << 3 +"""Orthographic context: unknown position in a sentence with upper case.""" + +_ORTHO_BEG_LC = 1 << 4 +"""Orthographic context: beginning of a sentence with lower case.""" + +_ORTHO_MID_LC = 1 << 5 +"""Orthographic context: middle of a sentence with lower case.""" + +_ORTHO_UNK_LC = 1 << 6 +"""Orthographic context: unknown position in a sentence with lower case.""" + +_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC +"""Orthographic context: occurs with upper case.""" + +_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC +"""Orthographic context: occurs with lower case.""" + +_ORTHO_MAP = { + ("initial", "upper"): _ORTHO_BEG_UC, + ("internal", "upper"): _ORTHO_MID_UC, + ("unknown", "upper"): _ORTHO_UNK_UC, + ("initial", "lower"): _ORTHO_BEG_LC, + ("internal", "lower"): _ORTHO_MID_LC, + ("unknown", "lower"): _ORTHO_UNK_LC, +} +"""A map from context position and first-letter case to the +appropriate orthographic context flag.""" + +# } (end orthographic context constants) +###################################################################### + +###################################################################### +# { Decision reasons for debugging +###################################################################### + +REASON_DEFAULT_DECISION = "default decision" +REASON_KNOWN_COLLOCATION = "known collocation (both words)" +REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = "abbreviation + orthographic heuristic" +REASON_ABBR_WITH_SENTENCE_STARTER = "abbreviation + frequent sentence starter" +REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = ( + "initial + special orthographic heuristic" +) + + +# } (end decision reasons for debugging) +###################################################################### + +###################################################################### +# { Language-dependent variables +###################################################################### + + +class PunktLanguageVars: + """ + Stores variables, mostly regular expressions, which may be + language-dependent for correct application of the algorithm. + An extension of this class may modify its properties to suit + a language other than English; an instance can then be passed + as an argument to PunktSentenceTokenizer and PunktTrainer + constructors. + """ + + __slots__ = ("_re_period_context", "_re_word_tokenizer") + + def __getstate__(self): + # All modifications to the class are performed by inheritance. + # Non-default parameters to be pickled must be defined in the inherited + # class. + return 1 + + def __setstate__(self, state): + return 1 + + sent_end_chars = (".", "?", "!") + """Characters which are candidates for sentence boundaries""" + + @property + def _re_sent_end_chars(self): + return "[%s]" % re.escape("".join(self.sent_end_chars)) + + internal_punctuation = ",:;" # might want to extend this.. + """sentence internal punctuation, which indicates an abbreviation if + preceded by a period-final token.""" + + re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)', re.MULTILINE) + """Used to realign punctuation that should be included in a sentence + although it follows the period (or ?, !).""" + + _re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]" + """Excludes some characters from starting word tokens""" + + @property + def _re_non_word_chars(self): + return r"(?:[)\";}\]\*:@\'\({\[%s])" % re.escape( + "".join(set(self.sent_end_chars) - {"."}) + ) + + """Characters that cannot appear within words""" + + _re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)" + """Hyphen and ellipsis are multi-character punctuation""" + + _word_tokenize_fmt = r"""( + %(MultiChar)s + | + (?=%(WordStart)s)\S+? # Accept word characters until end is found + (?= # Sequences marking a word's end + \s| # White-space + $| # End-of-string + %(NonWord)s|%(MultiChar)s| # Punctuation + ,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word + ) + | + \S + )""" + """Format of a regular expression to split punctuation from words, + excluding period.""" + + def _word_tokenizer_re(self): + """Compiles and returns a regular expression for word tokenization""" + try: + return self._re_word_tokenizer + except AttributeError: + self._re_word_tokenizer = re.compile( + self._word_tokenize_fmt + % { + "NonWord": self._re_non_word_chars, + "MultiChar": self._re_multi_char_punct, + "WordStart": self._re_word_start, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_word_tokenizer + + def word_tokenize(self, s): + """Tokenize a string to split off punctuation other than periods""" + return self._word_tokenizer_re().findall(s) + + _period_context_fmt = r""" + %(SentEndChars)s # a potential sentence ending + (?=(?P + %(NonWord)s # either other punctuation + | + \s+(?P\S+) # or whitespace and some other token + ))""" + """Format of a regular expression to find contexts including possible + sentence boundaries. Matches token which the possible sentence boundary + ends, and matches the following token within a lookahead expression.""" + + def period_context_re(self): + """Compiles and returns a regular expression to find contexts + including possible sentence boundaries.""" + try: + return self._re_period_context + except: + self._re_period_context = re.compile( + self._period_context_fmt + % { + "NonWord": self._re_non_word_chars, + "SentEndChars": self._re_sent_end_chars, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_period_context + + +_re_non_punct = re.compile(r"[^\W\d]", re.UNICODE) +"""Matches token types that are not merely punctuation. (Types for +numeric tokens are changed to ##number## and hence contain alpha.)""" + + +# } +###################################################################### + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def _pair_iter(iterator): + """ + Yields pairs of tokens from the given iterator such that each input + token will appear as the first element in a yielded tuple. The last + pair will have None as its second element. + """ + iterator = iter(iterator) + try: + prev = next(iterator) + except StopIteration: + return + for el in iterator: + yield (prev, el) + prev = el + yield (prev, None) + + +###################################################################### +# { Punkt Parameters +###################################################################### + + +class PunktParameters: + """Stores data used to perform sentence boundary detection with Punkt.""" + + def __init__(self): + self.abbrev_types = set() + """A set of word types for known abbreviations.""" + + self.collocations = set() + """A set of word type tuples for known common collocations + where the first word ends in a period. E.g., ('S.', 'Bach') + is a common collocation in a text that discusses 'Johann + S. Bach'. These count as negative evidence for sentence + boundaries.""" + + self.sent_starters = set() + """A set of word types for words that often appear at the + beginning of sentences.""" + + self.ortho_context = defaultdict(int) + """A dictionary mapping word types to the set of orthographic + contexts that word type appears in. Contexts are represented + by adding orthographic context flags: ...""" + + def clear_abbrevs(self): + self.abbrev_types = set() + + def clear_collocations(self): + self.collocations = set() + + def clear_sent_starters(self): + self.sent_starters = set() + + def clear_ortho_context(self): + self.ortho_context = defaultdict(int) + + def add_ortho_context(self, typ, flag): + self.ortho_context[typ] |= flag + + def _debug_ortho_context(self, typ): + context = self.ortho_context[typ] + if context & _ORTHO_BEG_UC: + yield "BEG-UC" + if context & _ORTHO_MID_UC: + yield "MID-UC" + if context & _ORTHO_UNK_UC: + yield "UNK-UC" + if context & _ORTHO_BEG_LC: + yield "BEG-LC" + if context & _ORTHO_MID_LC: + yield "MID-LC" + if context & _ORTHO_UNK_LC: + yield "UNK-LC" + + +###################################################################### +# { PunktToken +###################################################################### + + +class PunktToken: + """Stores a token of text with annotations produced during + sentence boundary detection.""" + + _properties = ["parastart", "linestart", "sentbreak", "abbr", "ellipsis"] + __slots__ = ["tok", "type", "period_final"] + _properties + + def __init__(self, tok, **params): + self.tok = tok + self.type = self._get_type(tok) + self.period_final = tok.endswith(".") + + for prop in self._properties: + setattr(self, prop, None) + for k in params: + setattr(self, k, params[k]) + + # //////////////////////////////////////////////////////////// + # { Regular expressions for properties + # //////////////////////////////////////////////////////////// + # Note: [A-Za-z] is approximated by [^\W\d] in the general case. + _RE_ELLIPSIS = re.compile(r"\.\.+$") + _RE_NUMERIC = re.compile(r"^-?[\.,]?\d[\d,\.-]*\.?$") + _RE_INITIAL = re.compile(r"[^\W\d]\.$", re.UNICODE) + _RE_ALPHA = re.compile(r"[^\W\d]+$", re.UNICODE) + + # //////////////////////////////////////////////////////////// + # { Derived properties + # //////////////////////////////////////////////////////////// + + def _get_type(self, tok): + """Returns a case-normalized representation of the token.""" + return self._RE_NUMERIC.sub("##number##", tok.lower()) + + @property + def type_no_period(self): + """ + The type with its final period removed if it has one. + """ + if len(self.type) > 1 and self.type[-1] == ".": + return self.type[:-1] + return self.type + + @property + def type_no_sentperiod(self): + """ + The type with its final period removed if it is marked as a + sentence break. + """ + if self.sentbreak: + return self.type_no_period + return self.type + + @property + def first_upper(self): + """True if the token's first character is uppercase.""" + return self.tok[0].isupper() + + @property + def first_lower(self): + """True if the token's first character is lowercase.""" + return self.tok[0].islower() + + @property + def first_case(self): + if self.first_lower: + return "lower" + if self.first_upper: + return "upper" + return "none" + + @property + def is_ellipsis(self): + """True if the token text is that of an ellipsis.""" + return self._RE_ELLIPSIS.match(self.tok) + + @property + def is_number(self): + """True if the token text is that of a number.""" + return self.type.startswith("##number##") + + @property + def is_initial(self): + """True if the token text is that of an initial.""" + return self._RE_INITIAL.match(self.tok) + + @property + def is_alpha(self): + """True if the token text is all alphabetic.""" + return self._RE_ALPHA.match(self.tok) + + @property + def is_non_punct(self): + """True if the token is either a number or is alphabetic.""" + return _re_non_punct.search(self.type) + + # //////////////////////////////////////////////////////////// + # { String representation + # //////////////////////////////////////////////////////////// + + def __repr__(self): + """ + A string representation of the token that can reproduce it + with eval(), which lists all the token's non-default + annotations. + """ + typestr = " type=%s," % repr(self.type) if self.type != self.tok else "" + + propvals = ", ".join( + f"{p}={repr(getattr(self, p))}" + for p in self._properties + if getattr(self, p) + ) + + return "{}({},{} {})".format( + self.__class__.__name__, + repr(self.tok), + typestr, + propvals, + ) + + def __str__(self): + """ + A string representation akin to that used by Kiss and Strunk. + """ + res = self.tok + if self.abbr: + res += "" + if self.ellipsis: + res += "" + if self.sentbreak: + res += "" + return res + + +###################################################################### +# { Punkt base class +###################################################################### + + +class PunktBaseClass: + """ + Includes common components of PunktTrainer and PunktSentenceTokenizer. + """ + + def __init__(self, lang_vars=None, token_cls=PunktToken, params=None): + if lang_vars is None: + lang_vars = PunktLanguageVars() + if params is None: + params = PunktParameters() + self._params = params + self._lang_vars = lang_vars + self._Token = token_cls + """The collection of parameters that determines the behavior + of the punkt tokenizer.""" + + # //////////////////////////////////////////////////////////// + # { Word tokenization + # //////////////////////////////////////////////////////////// + + def _tokenize_words(self, plaintext): + """ + Divide the given text into tokens, using the punkt word + segmentation regular expression, and generate the resulting list + of tokens augmented as three-tuples with two boolean values for whether + the given token occurs at the start of a paragraph or a new line, + respectively. + """ + parastart = False + for line in plaintext.split("\n"): + if line.strip(): + line_toks = iter(self._lang_vars.word_tokenize(line)) + + try: + tok = next(line_toks) + except StopIteration: + continue + + yield self._Token(tok, parastart=parastart, linestart=True) + parastart = False + + for tok in line_toks: + yield self._Token(tok) + else: + parastart = True + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_first_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Perform the first pass of annotation, which makes decisions + based purely based on the word type of each word: + + - '?', '!', and '.' are marked as sentence breaks. + - sequences of two or more periods are marked as ellipsis. + - any word ending in '.' that's a known abbreviation is + marked as an abbreviation. + - any other word ending in '.' is marked as a sentence break. + + Return these annotations as a tuple of three sets: + + - sentbreak_toks: The indices of all sentence breaks. + - abbrev_toks: The indices of all abbreviations. + - ellipsis_toks: The indices of all ellipsis marks. + """ + for aug_tok in tokens: + self._first_pass_annotation(aug_tok) + yield aug_tok + + def _first_pass_annotation(self, aug_tok: PunktToken) -> None: + """ + Performs type-based annotation on a single token. + """ + + tok = aug_tok.tok + + if tok in self._lang_vars.sent_end_chars: + aug_tok.sentbreak = True + elif aug_tok.is_ellipsis: + aug_tok.ellipsis = True + elif aug_tok.period_final and not tok.endswith(".."): + if ( + tok[:-1].lower() in self._params.abbrev_types + or tok[:-1].lower().split("-")[-1] in self._params.abbrev_types + ): + + aug_tok.abbr = True + else: + aug_tok.sentbreak = True + + return + + +###################################################################### +# { Punkt Trainer +###################################################################### + + +class PunktTrainer(PunktBaseClass): + """Learns parameters used in Punkt sentence boundary detection.""" + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + self._type_fdist = FreqDist() + """A frequency distribution giving the frequency of each + case-normalized token type in the training data.""" + + self._num_period_toks = 0 + """The number of words ending in period in the training data.""" + + self._collocation_fdist = FreqDist() + """A frequency distribution giving the frequency of all + bigrams in the training data where the first word ends in a + period. Bigrams are encoded as tuples of word types. + Especially common collocations are extracted from this + frequency distribution, and stored in + ``_params``.``collocations ``.""" + + self._sent_starter_fdist = FreqDist() + """A frequency distribution giving the frequency of all words + that occur at the training data at the beginning of a sentence + (after the first pass of annotation). Especially common + sentence starters are extracted from this frequency + distribution, and stored in ``_params.sent_starters``. + """ + + self._sentbreak_count = 0 + """The total number of sentence breaks identified in training, used for + calculating the frequent sentence starter heuristic.""" + + self._finalized = True + """A flag as to whether the training has been finalized by finding + collocations and sentence starters, or whether finalize_training() + still needs to be called.""" + + if train_text: + self.train(train_text, verbose, finalize=True) + + def get_params(self): + """ + Calculates and returns parameters for sentence boundary detection as + derived from training.""" + if not self._finalized: + self.finalize_training() + return self._params + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + ABBREV = 0.3 + """cut-off value whether a 'token' is an abbreviation""" + + IGNORE_ABBREV_PENALTY = False + """allows the disabling of the abbreviation penalty heuristic, which + exponentially disadvantages words that are found at times without a + final period.""" + + ABBREV_BACKOFF = 5 + """upper cut-off for Mikheev's(2002) abbreviation detection algorithm""" + + COLLOCATION = 7.88 + """minimal log-likelihood value that two tokens need to be considered + as a collocation""" + + SENT_STARTER = 30 + """minimal log-likelihood value that a token requires to be considered + as a frequent sentence starter""" + + INCLUDE_ALL_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word ends in a period. It may be useful in corpora where there is a lot + of variation that makes abbreviations like Mr difficult to identify.""" + + INCLUDE_ABBREV_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word is an abbreviation. Such collocations override the orthographic + heuristic, but not the sentence starter heuristic. This is overridden by + INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials + and ordinals are considered.""" + """""" + + MIN_COLLOC_FREQ = 1 + """this sets a minimum bound on the number of times a bigram needs to + appear before it can be considered a collocation, in addition to log + likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True.""" + + # //////////////////////////////////////////////////////////// + # { Training.. + # //////////////////////////////////////////////////////////// + + def train(self, text, verbose=False, finalize=True): + """ + Collects training data from a given text. If finalize is True, it + will determine all the parameters for sentence boundary detection. If + not, this will be delayed until get_params() or finalize_training() is + called. If verbose is True, abbreviations found will be listed. + """ + # Break the text into tokens; record which token indices correspond to + # line starts and paragraph starts; and determine their types. + self._train_tokens(self._tokenize_words(text), verbose) + if finalize: + self.finalize_training(verbose) + + def train_tokens(self, tokens, verbose=False, finalize=True): + """ + Collects training data from a given list of tokens. + """ + self._train_tokens((self._Token(t) for t in tokens), verbose) + if finalize: + self.finalize_training(verbose) + + def _train_tokens(self, tokens, verbose): + self._finalized = False + + # Ensure tokens are a list + tokens = list(tokens) + + # Find the frequency of each case-normalized type. (Don't + # strip off final periods.) Also keep track of the number of + # tokens that end in periods. + for aug_tok in tokens: + self._type_fdist[aug_tok.type] += 1 + if aug_tok.period_final: + self._num_period_toks += 1 + + # Look for new abbreviations, and for types that no longer are + unique_types = self._unique_types(tokens) + for abbr, score, is_add in self._reclassify_abbrev_types(unique_types): + if score >= self.ABBREV: + if is_add: + self._params.abbrev_types.add(abbr) + if verbose: + print(f" Abbreviation: [{score:6.4f}] {abbr}") + else: + if not is_add: + self._params.abbrev_types.remove(abbr) + if verbose: + print(f" Removed abbreviation: [{score:6.4f}] {abbr}") + + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = list(self._annotate_first_pass(tokens)) + + # Check what contexts each word type can appear in, given the + # case of its first letter. + self._get_orthography_data(tokens) + + # We need total number of sentence breaks to find sentence starters + self._sentbreak_count += self._get_sentbreak_count(tokens) + + # The remaining heuristics relate to pairs of tokens where the first + # ends in a period. + for aug_tok1, aug_tok2 in _pair_iter(tokens): + if not aug_tok1.period_final or not aug_tok2: + continue + + # Is the first token a rare abbreviation? + if self._is_rare_abbrev_type(aug_tok1, aug_tok2): + self._params.abbrev_types.add(aug_tok1.type_no_period) + if verbose: + print(" Rare Abbrev: %s" % aug_tok1.type) + + # Does second token have a high likelihood of starting a sentence? + if self._is_potential_sent_starter(aug_tok2, aug_tok1): + self._sent_starter_fdist[aug_tok2.type] += 1 + + # Is this bigram a potential collocation? + if self._is_potential_collocation(aug_tok1, aug_tok2): + self._collocation_fdist[ + (aug_tok1.type_no_period, aug_tok2.type_no_sentperiod) + ] += 1 + + def _unique_types(self, tokens): + return {aug_tok.type for aug_tok in tokens} + + def finalize_training(self, verbose=False): + """ + Uses data that has been gathered in training to determine likely + collocations and sentence starters. + """ + self._params.clear_sent_starters() + for typ, log_likelihood in self._find_sent_starters(): + self._params.sent_starters.add(typ) + if verbose: + print(f" Sent Starter: [{log_likelihood:6.4f}] {typ!r}") + + self._params.clear_collocations() + for (typ1, typ2), log_likelihood in self._find_collocations(): + self._params.collocations.add((typ1, typ2)) + if verbose: + print(f" Collocation: [{log_likelihood:6.4f}] {typ1!r}+{typ2!r}") + + self._finalized = True + + # //////////////////////////////////////////////////////////// + # { Overhead reduction + # //////////////////////////////////////////////////////////// + + def freq_threshold( + self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2 + ): + """ + Allows memory use to be reduced after much training by removing data + about rare tokens that are unlikely to have a statistical effect with + further training. Entries occurring above the given thresholds will be + retained. + """ + if ortho_thresh > 1: + old_oc = self._params.ortho_context + self._params.clear_ortho_context() + for tok in self._type_fdist: + count = self._type_fdist[tok] + if count >= ortho_thresh: + self._params.ortho_context[tok] = old_oc[tok] + + self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh) + self._collocation_fdist = self._freq_threshold( + self._collocation_fdist, colloc_thres + ) + self._sent_starter_fdist = self._freq_threshold( + self._sent_starter_fdist, sentstart_thresh + ) + + def _freq_threshold(self, fdist, threshold): + """ + Returns a FreqDist containing only data with counts below a given + threshold, as well as a mapping (None -> count_removed). + """ + # We assume that there is more data below the threshold than above it + # and so create a new FreqDist rather than working in place. + res = FreqDist() + num_removed = 0 + for tok in fdist: + count = fdist[tok] + if count < threshold: + num_removed += 1 + else: + res[tok] += count + res[None] += num_removed + return res + + # //////////////////////////////////////////////////////////// + # { Orthographic data + # //////////////////////////////////////////////////////////// + + def _get_orthography_data(self, tokens): + """ + Collect information about whether each token type occurs + with different case patterns (i) overall, (ii) at + sentence-initial positions, and (iii) at sentence-internal + positions. + """ + # 'initial' or 'internal' or 'unknown' + context = "internal" + tokens = list(tokens) + + for aug_tok in tokens: + # If we encounter a paragraph break, then it's a good sign + # that it's a sentence break. But err on the side of + # caution (by not positing a sentence break) if we just + # saw an abbreviation. + if aug_tok.parastart and context != "unknown": + context = "initial" + + # If we're at the beginning of a line, then we can't decide + # between 'internal' and 'initial'. + if aug_tok.linestart and context == "internal": + context = "unknown" + + # Find the case-normalized type of the token. If it's a + # sentence-final token, strip off the period. + typ = aug_tok.type_no_sentperiod + + # Update the orthographic context table. + flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0) + if flag: + self._params.add_ortho_context(typ, flag) + + # Decide whether the next word is at a sentence boundary. + if aug_tok.sentbreak: + if not (aug_tok.is_number or aug_tok.is_initial): + context = "initial" + else: + context = "unknown" + elif aug_tok.ellipsis or aug_tok.abbr: + context = "unknown" + else: + context = "internal" + + # //////////////////////////////////////////////////////////// + # { Abbreviations + # //////////////////////////////////////////////////////////// + + def _reclassify_abbrev_types(self, types): + """ + (Re)classifies each given token if + - it is period-final and not a known abbreviation; or + - it is not period-final and is otherwise a known abbreviation + by checking whether its previous classification still holds according + to the heuristics of section 3. + Yields triples (abbr, score, is_add) where abbr is the type in question, + score is its log-likelihood with penalties applied, and is_add specifies + whether the present type is a candidate for inclusion or exclusion as an + abbreviation, such that: + - (is_add and score >= 0.3) suggests a new abbreviation; and + - (not is_add and score < 0.3) suggests excluding an abbreviation. + """ + # (While one could recalculate abbreviations from all .-final tokens at + # every iteration, in cases requiring efficiency, the number of tokens + # in the present training document will be much less.) + + for typ in types: + # Check some basic conditions, to rule out words that are + # clearly not abbrev_types. + if not _re_non_punct.search(typ) or typ == "##number##": + continue + + if typ.endswith("."): + if typ in self._params.abbrev_types: + continue + typ = typ[:-1] + is_add = True + else: + if typ not in self._params.abbrev_types: + continue + is_add = False + + # Count how many periods & nonperiods are in the + # candidate. + num_periods = typ.count(".") + 1 + num_nonperiods = len(typ) - num_periods + 1 + + # Let be the candidate without the period, and + # be the period. Find a log likelihood ratio that + # indicates whether occurs as a single unit (high + # value of log_likelihood), or as two independent units and + # (low value of log_likelihood). + count_with_period = self._type_fdist[typ + "."] + count_without_period = self._type_fdist[typ] + log_likelihood = self._dunning_log_likelihood( + count_with_period + count_without_period, + self._num_period_toks, + count_with_period, + self._type_fdist.N(), + ) + + # Apply three scaling factors to 'tweak' the basic log + # likelihood ratio: + # F_length: long word -> less likely to be an abbrev + # F_periods: more periods -> more likely to be an abbrev + # F_penalty: penalize occurrences w/o a period + f_length = math.exp(-num_nonperiods) + f_periods = num_periods + f_penalty = int(self.IGNORE_ABBREV_PENALTY) or math.pow( + num_nonperiods, -count_without_period + ) + score = log_likelihood * f_length * f_periods * f_penalty + + yield typ, score, is_add + + def find_abbrev_types(self): + """ + Recalculates abbreviations given type frequencies, despite no prior + determination of abbreviations. + This fails to include abbreviations otherwise found as "rare". + """ + self._params.clear_abbrevs() + tokens = (typ for typ in self._type_fdist if typ and typ.endswith(".")) + for abbr, score, _is_add in self._reclassify_abbrev_types(tokens): + if score >= self.ABBREV: + self._params.abbrev_types.add(abbr) + + # This function combines the work done by the original code's + # functions `count_orthography_context`, `get_orthography_count`, + # and `get_rare_abbreviations`. + def _is_rare_abbrev_type(self, cur_tok, next_tok): + """ + A word type is counted as a rare abbreviation if... + - it's not already marked as an abbreviation + - it occurs fewer than ABBREV_BACKOFF times + - either it is followed by a sentence-internal punctuation + mark, *or* it is followed by a lower-case word that + sometimes appears with upper case, but never occurs with + lower case at the beginning of sentences. + """ + if cur_tok.abbr or not cur_tok.sentbreak: + return False + + # Find the case-normalized type of the token. If it's + # a sentence-final token, strip off the period. + typ = cur_tok.type_no_sentperiod + + # Proceed only if the type hasn't been categorized as an + # abbreviation already, and is sufficiently rare... + count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] + if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF: + return False + + # Record this token as an abbreviation if the next + # token is a sentence-internal punctuation mark. + # [XX] :1 or check the whole thing?? + if next_tok.tok[:1] in self._lang_vars.internal_punctuation: + return True + + # Record this type as an abbreviation if the next + # token... (i) starts with a lower case letter, + # (ii) sometimes occurs with an uppercase letter, + # and (iii) never occus with an uppercase letter + # sentence-internally. + # [xx] should the check for (ii) be modified?? + if next_tok.first_lower: + typ2 = next_tok.type_no_sentperiod + typ2ortho_context = self._params.ortho_context[typ2] + if (typ2ortho_context & _ORTHO_BEG_UC) and not ( + typ2ortho_context & _ORTHO_MID_UC + ): + return True + + # //////////////////////////////////////////////////////////// + # { Log Likelihoods + # //////////////////////////////////////////////////////////// + + # helper for _reclassify_abbrev_types: + @staticmethod + def _dunning_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that calculates the modified Dunning log-likelihood + ratio scores for abbreviation candidates. The details of how + this works is available in the paper. + """ + p1 = count_b / N + p2 = 0.99 + + null_hypo = count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1) + alt_hypo = count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2) + + likelihood = null_hypo - alt_hypo + + return -2.0 * likelihood + + @staticmethod + def _col_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that will just compute log-likelihood estimate, in + the original paper it's described in algorithm 6 and 7. + + This *should* be the original Dunning log-likelihood values, + unlike the previous log_l function where it used modified + Dunning log-likelihood values + """ + p = count_b / N + p1 = count_ab / count_a + try: + p2 = (count_b - count_ab) / (N - count_a) + except ZeroDivisionError: + p2 = 1 + + try: + summand1 = count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p) + except ValueError: + summand1 = 0 + + try: + summand2 = (count_b - count_ab) * math.log(p) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p) + except ValueError: + summand2 = 0 + + if count_a == count_ab or p1 <= 0 or p1 >= 1: + summand3 = 0 + else: + summand3 = count_ab * math.log(p1) + (count_a - count_ab) * math.log( + 1.0 - p1 + ) + + if count_b == count_ab or p2 <= 0 or p2 >= 1: + summand4 = 0 + else: + summand4 = (count_b - count_ab) * math.log(p2) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p2) + + likelihood = summand1 + summand2 - summand3 - summand4 + + return -2.0 * likelihood + + # //////////////////////////////////////////////////////////// + # { Collocation Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_collocation(self, aug_tok1, aug_tok2): + """ + Returns True if the pair of tokens may form a collocation given + log-likelihood statistics. + """ + return ( + ( + self.INCLUDE_ALL_COLLOCS + or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) + or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial)) + ) + and aug_tok1.is_non_punct + and aug_tok2.is_non_punct + ) + + def _find_collocations(self): + """ + Generates likely collocations and their log-likelihood. + """ + for types in self._collocation_fdist: + try: + typ1, typ2 = types + except TypeError: + # types may be None after calling freq_threshold() + continue + if typ2 in self._params.sent_starters: + continue + + col_count = self._collocation_fdist[types] + typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + "."] + typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + "."] + if ( + typ1_count > 1 + and typ2_count > 1 + and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count) + ): + + log_likelihood = self._col_log_likelihood( + typ1_count, typ2_count, col_count, self._type_fdist.N() + ) + # Filter out the not-so-collocative + if log_likelihood >= self.COLLOCATION and ( + self._type_fdist.N() / typ1_count > typ2_count / col_count + ): + yield (typ1, typ2), log_likelihood + + # //////////////////////////////////////////////////////////// + # { Sentence-Starter Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_sent_starter(self, cur_tok, prev_tok): + """ + Returns True given a token and the token that precedes it if it + seems clear that the token is beginning a sentence. + """ + # If a token (i) is preceded by a sentece break that is + # not a potential ordinal number or initial, and (ii) is + # alphabetic, then it is a a sentence-starter. + return ( + prev_tok.sentbreak + and not (prev_tok.is_number or prev_tok.is_initial) + and cur_tok.is_alpha + ) + + def _find_sent_starters(self): + """ + Uses collocation heuristics for each candidate token to + determine if it frequently starts sentences. + """ + for typ in self._sent_starter_fdist: + if not typ: + continue + + typ_at_break_count = self._sent_starter_fdist[typ] + typ_count = self._type_fdist[typ] + self._type_fdist[typ + "."] + if typ_count < typ_at_break_count: + # needed after freq_threshold + continue + + log_likelihood = self._col_log_likelihood( + self._sentbreak_count, + typ_count, + typ_at_break_count, + self._type_fdist.N(), + ) + + if ( + log_likelihood >= self.SENT_STARTER + and self._type_fdist.N() / self._sentbreak_count + > typ_count / typ_at_break_count + ): + yield typ, log_likelihood + + def _get_sentbreak_count(self, tokens): + """ + Returns the number of sentence breaks marked in a given set of + augmented tokens. + """ + return sum(1 for aug_tok in tokens if aug_tok.sentbreak) + + +###################################################################### +# { Punkt Sentence Tokenizer +###################################################################### + + +class PunktSentenceTokenizer(PunktBaseClass, TokenizerI): + """ + A sentence tokenizer which uses an unsupervised algorithm to build + a model for abbreviation words, collocations, and words that start + sentences; and then uses that model to find sentence boundaries. + This approach has been shown to work well for many European + languages. + """ + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + """ + train_text can either be the sole training text for this sentence + boundary detector, or can be a PunktParameters object. + """ + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + if train_text: + self._params = self.train(train_text, verbose) + + def train(self, train_text, verbose=False): + """ + Derives parameters from a given training text, or uses the parameters + given. Repeated calls to this method destroy previous parameters. For + incremental training, instantiate a separate PunktTrainer instance. + """ + if not isinstance(train_text, str): + return train_text + return PunktTrainer( + train_text, lang_vars=self._lang_vars, token_cls=self._Token + ).get_params() + + # //////////////////////////////////////////////////////////// + # { Tokenization + # //////////////////////////////////////////////////////////// + + def tokenize(self, text: str, realign_boundaries: bool = True) -> List[str]: + """ + Given a text, returns a list of the sentences in that text. + """ + return list(self.sentences_from_text(text, realign_boundaries)) + + def debug_decisions(self, text: str) -> Iterator[Dict[str, Any]]: + """ + Classifies candidate periods as sentence breaks, yielding a dict for + each that may be used to understand why the decision was made. + + See format_debug_decision() to help make this output readable. + """ + + for match, decision_text in self._match_potential_end_contexts(text): + tokens = self._tokenize_words(decision_text) + tokens = list(self._annotate_first_pass(tokens)) + while tokens and not tokens[0].tok.endswith(self._lang_vars.sent_end_chars): + tokens.pop(0) + yield { + "period_index": match.end() - 1, + "text": decision_text, + "type1": tokens[0].type, + "type2": tokens[1].type, + "type1_in_abbrs": bool(tokens[0].abbr), + "type1_is_initial": bool(tokens[0].is_initial), + "type2_is_sent_starter": tokens[1].type_no_sentperiod + in self._params.sent_starters, + "type2_ortho_heuristic": self._ortho_heuristic(tokens[1]), + "type2_ortho_contexts": set( + self._params._debug_ortho_context(tokens[1].type_no_sentperiod) + ), + "collocation": ( + tokens[0].type_no_sentperiod, + tokens[1].type_no_sentperiod, + ) + in self._params.collocations, + "reason": self._second_pass_annotation(tokens[0], tokens[1]) + or REASON_DEFAULT_DECISION, + "break_decision": tokens[0].sentbreak, + } + + def span_tokenize( + self, text: str, realign_boundaries: bool = True + ) -> Iterator[Tuple[int, int]]: + """ + Given a text, generates (start, end) spans of sentences + in the text. + """ + slices = self._slices_from_text(text) + if realign_boundaries: + slices = self._realign_boundaries(text, slices) + for sentence in slices: + yield (sentence.start, sentence.stop) + + def sentences_from_text( + self, text: str, realign_boundaries: bool = True + ) -> List[str]: + """ + Given a text, generates the sentences in that text by only + testing candidate sentence breaks. If realign_boundaries is + True, includes in the sentence closing punctuation that + follows the period. + """ + return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)] + + def _get_last_whitespace_index(self, text: str) -> int: + """ + Given a text, find the index of the *last* occurrence of *any* + whitespace character, i.e. " ", "\n", "\t", "\r", etc. + If none is found, return 0. + """ + for i in range(len(text) - 1, -1, -1): + if text[i] in string.whitespace: + return i + return 0 + + def _match_potential_end_contexts(self, text: str) -> Iterator[Tuple[Match, str]]: + """ + Given a text, find the matches of potential sentence breaks, + alongside the contexts surrounding these sentence breaks. + + Since the fix for the ReDOS discovered in issue #2866, we no longer match + the word before a potential end of sentence token. Instead, we use a separate + regex for this. As a consequence, `finditer`'s desire to find non-overlapping + matches no longer aids us in finding the single longest match. + Where previously, we could use:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +SKIP + [] + + Now we have to find the word before (i.e. 'acting') separately, and `finditer` + returns:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +NORMALIZE_WHITESPACE + [, + , + ] + + So, we need to find the word before the match from right to left, and then manually remove + the overlaps. That is what this method does:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._match_potential_end_contexts(text)) + [(, 'acting!!! I')] + + :param text: String of one or more sentences + :type text: str + :return: Generator of match-context tuples. + :rtype: Iterator[Tuple[Match, str]] + """ + previous_slice = slice(0, 0) + previous_match = None + for match in self._lang_vars.period_context_re().finditer(text): + + # Get the slice of the previous word + before_text = text[previous_slice.stop : match.start()] + index_after_last_space = self._get_last_whitespace_index(before_text) + if index_after_last_space: + # + 1 to exclude the space itself + index_after_last_space += previous_slice.stop + 1 + else: + index_after_last_space = previous_slice.start + prev_word_slice = slice(index_after_last_space, match.start()) + + # If the previous slice does not overlap with this slice, then + # we can yield the previous match and slice. If there is an overlap, + # then we do not yield the previous match and slice. + if previous_match and previous_slice.stop <= prev_word_slice.start: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + previous_match = match + previous_slice = prev_word_slice + + # Yield the last match and context, if it exists + if previous_match: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + + def _slices_from_text(self, text: str) -> Iterator[slice]: + last_break = 0 + for match, context in self._match_potential_end_contexts(text): + if self.text_contains_sentbreak(context): + yield slice(last_break, match.end()) + if match.group("next_tok"): + # next sentence starts after whitespace + last_break = match.start("next_tok") + else: + # next sentence starts at following punctuation + last_break = match.end() + # The last sentence should not contain trailing whitespace. + yield slice(last_break, len(text.rstrip())) + + def _realign_boundaries( + self, text: str, slices: Iterator[slice] + ) -> Iterator[slice]: + """ + Attempts to realign punctuation that falls after the period but + should otherwise be included in the same sentence. + + For example: "(Sent1.) Sent2." will otherwise be split as:: + + ["(Sent1.", ") Sent1."]. + + This method will produce:: + + ["(Sent1.)", "Sent2."]. + """ + realign = 0 + for sentence1, sentence2 in _pair_iter(slices): + sentence1 = slice(sentence1.start + realign, sentence1.stop) + if not sentence2: + if text[sentence1]: + yield sentence1 + continue + + m = self._lang_vars.re_boundary_realignment.match(text[sentence2]) + if m: + yield slice(sentence1.start, sentence2.start + len(m.group(0).rstrip())) + realign = m.end() + else: + realign = 0 + if text[sentence1]: + yield sentence1 + + def text_contains_sentbreak(self, text: str) -> bool: + """ + Returns True if the given text includes a sentence break. + """ + found = False # used to ignore last token + for tok in self._annotate_tokens(self._tokenize_words(text)): + if found: + return True + if tok.sentbreak: + found = True + return False + + def sentences_from_text_legacy(self, text: str) -> Iterator[str]: + """ + Given a text, generates the sentences in that text. Annotates all + tokens, rather than just those with possible sentence breaks. Should + produce the same results as ``sentences_from_text``. + """ + tokens = self._annotate_tokens(self._tokenize_words(text)) + return self._build_sentence_list(text, tokens) + + def sentences_from_tokens( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Given a sequence of tokens, generates lists of tokens, each list + corresponding to a sentence. + """ + tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens)) + sentence = [] + for aug_tok in tokens: + sentence.append(aug_tok.tok) + if aug_tok.sentbreak: + yield sentence + sentence = [] + if sentence: + yield sentence + + def _annotate_tokens(self, tokens: Iterator[PunktToken]) -> Iterator[PunktToken]: + """ + Given a set of tokens augmented with markers for line-start and + paragraph-start, returns an iterator through those tokens with full + annotation including predicted sentence breaks. + """ + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = self._annotate_first_pass(tokens) + + # Make a second pass through the document, using token context + # information to change our preliminary decisions about where + # sentence breaks, abbreviations, and ellipsis occurs. + tokens = self._annotate_second_pass(tokens) + + ## [XX] TESTING + # tokens = list(tokens) + # self.dump(tokens) + + return tokens + + def _build_sentence_list( + self, text: str, tokens: Iterator[PunktToken] + ) -> Iterator[str]: + """ + Given the original text and the list of augmented word tokens, + construct and return a tokenized list of sentence strings. + """ + # Most of the work here is making sure that we put the right + # pieces of whitespace back in all the right places. + + # Our position in the source text, used to keep track of which + # whitespace to add: + pos = 0 + + # A regular expression that finds pieces of whitespace: + white_space_regexp = re.compile(r"\s*") + + sentence = "" + for aug_tok in tokens: + tok = aug_tok.tok + + # Find the whitespace before this token, and update pos. + white_space = white_space_regexp.match(text, pos).group() + pos += len(white_space) + + # Some of the rules used by the punkt word tokenizer + # strip whitespace out of the text, resulting in tokens + # that contain whitespace in the source text. If our + # token doesn't match, see if adding whitespace helps. + # If so, then use the version with whitespace. + if text[pos : pos + len(tok)] != tok: + pat = r"\s*".join(re.escape(c) for c in tok) + m = re.compile(pat).match(text, pos) + if m: + tok = m.group() + + # Move our position pointer to the end of the token. + assert text[pos : pos + len(tok)] == tok + pos += len(tok) + + # Add this token. If it's not at the beginning of the + # sentence, then include any whitespace that separated it + # from the previous token. + if sentence: + sentence += white_space + sentence += tok + + # If we're at a sentence break, then start a new sentence. + if aug_tok.sentbreak: + yield sentence + sentence = "" + + # If the last sentence is empty, discard it. + if sentence: + yield sentence + + # [XX] TESTING + def dump(self, tokens: Iterator[PunktToken]) -> None: + print("writing to /tmp/punkt.new...") + with open("/tmp/punkt.new", "w") as outfile: + for aug_tok in tokens: + if aug_tok.parastart: + outfile.write("\n\n") + elif aug_tok.linestart: + outfile.write("\n") + else: + outfile.write(" ") + + outfile.write(str(aug_tok)) + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + PUNCTUATION = tuple(";:,.!?") + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_second_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Performs a token-based classification (section 4) over the given + tokens, making use of the orthographic heuristic (4.1.1), collocation + heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3). + """ + for token1, token2 in _pair_iter(tokens): + self._second_pass_annotation(token1, token2) + yield token1 + + def _second_pass_annotation( + self, aug_tok1: PunktToken, aug_tok2: Optional[PunktToken] + ) -> Optional[str]: + """ + Performs token-based classification over a pair of contiguous tokens + updating the first. + """ + # Is it the last token? We can't do anything then. + if not aug_tok2: + return + + if not aug_tok1.period_final: + # We only care about words ending in periods. + return + typ = aug_tok1.type_no_period + next_typ = aug_tok2.type_no_sentperiod + tok_is_initial = aug_tok1.is_initial + + # [4.1.2. Collocation Heuristic] If there's a + # collocation between the word before and after the + # period, then label tok as an abbreviation and NOT + # a sentence break. Note that collocations with + # frequent sentence starters as their second word are + # excluded in training. + if (typ, next_typ) in self._params.collocations: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_KNOWN_COLLOCATION + + # [4.2. Token-Based Reclassification of Abbreviations] If + # the token is an abbreviation or an ellipsis, then decide + # whether we should *also* classify it as a sentbreak. + if (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial): + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + if is_sent_starter == True: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC + + # [4.1.3. Frequent Sentence Starter Heruistic] If the + # next word is capitalized, and is a member of the + # frequent-sentence-starters list, then label tok as a + # sentence break. + if aug_tok2.first_upper and next_typ in self._params.sent_starters: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_SENTENCE_STARTER + + # [4.3. Token-Based Detection of Initials and Ordinals] + # Check if any initials or ordinals tokens that are marked + # as sentbreaks should be reclassified as abbreviations. + if tok_is_initial or typ == "##number##": + + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + + if is_sent_starter == False: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + if tok_is_initial: + return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC + return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC + + # Special heuristic for initials: if orthogrpahic + # heuristic is unknown, and next word is always + # capitalized, then mark as abbrev (eg: J. Bach). + if ( + is_sent_starter == "unknown" + and tok_is_initial + and aug_tok2.first_upper + and not (self._params.ortho_context[next_typ] & _ORTHO_LC) + ): + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC + + return + + def _ortho_heuristic(self, aug_tok: PunktToken) -> Union[bool, str]: + """ + Decide whether the given token is the first token in a sentence. + """ + # Sentences don't start with punctuation marks: + if aug_tok.tok in self.PUNCTUATION: + return False + + ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod] + + # If the word is capitalized, occurs at least once with a + # lower case first letter, and never occurs with an upper case + # first letter sentence-internally, then it's a sentence starter. + if ( + aug_tok.first_upper + and (ortho_context & _ORTHO_LC) + and not (ortho_context & _ORTHO_MID_UC) + ): + return True + + # If the word is lower case, and either (a) we've seen it used + # with upper case, or (b) we've never seen it used + # sentence-initially with lower case, then it's not a sentence + # starter. + if aug_tok.first_lower and ( + (ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC) + ): + return False + + # Otherwise, we're not sure. + return "unknown" + + +DEBUG_DECISION_FMT = """Text: {text!r} (at offset {period_index}) +Sentence break? {break_decision} ({reason}) +Collocation? {collocation} +{type1!r}: + known abbreviation: {type1_in_abbrs} + is initial: {type1_is_initial} +{type2!r}: + known sentence starter: {type2_is_sent_starter} + orthographic heuristic suggests is a sentence starter? {type2_ortho_heuristic} + orthographic contexts in training: {type2_ortho_contexts} +""" + + +def format_debug_decision(d): + return DEBUG_DECISION_FMT.format(**d) + + +def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer): + """Builds a punkt model and applies it to the same text""" + cleanup = ( + lambda s: re.compile(r"(?:\r|^\s+)", re.MULTILINE).sub("", s).replace("\n", " ") + ) + trainer = train_cls() + trainer.INCLUDE_ALL_COLLOCS = True + trainer.train(text) + sbd = tok_cls(trainer.get_params()) + for sentence in sbd.sentences_from_text(text): + print(cleanup(sentence)) diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/regexp.py b/venv/lib/python3.10/site-packages/nltk/tokenize/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..e3875b1447ba2843b7e6f186de24b4e67baf8844 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/regexp.py @@ -0,0 +1,220 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +r""" +Regular-Expression Tokenizers + +A ``RegexpTokenizer`` splits a string into substrings using a regular expression. +For example, the following tokenizer forms tokens out of alphabetic sequences, +money expressions, and any other non-whitespace sequences: + + >>> from nltk.tokenize import RegexpTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +A ``RegexpTokenizer`` can use its regexp to match delimiters instead: + + >>> tokenizer = RegexpTokenizer(r'\s+', gaps=True) + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + +Note that empty tokens are not returned when the delimiter appears at +the start or end of the string. + +The material between the tokens is discarded. For example, +the following tokenizer selects just the capitalized words: + + >>> capword_tokenizer = RegexpTokenizer(r'[A-Z]\w+') + >>> capword_tokenizer.tokenize(s) + ['Good', 'New', 'York', 'Please', 'Thanks'] + +This module contains several subclasses of ``RegexpTokenizer`` +that use pre-defined regular expressions. + + >>> from nltk.tokenize import BlanklineTokenizer + >>> # Uses '\s*\n\s*\n\s*': + >>> BlanklineTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', + 'Thanks.'] + +All of the regular expression tokenizers are also available as functions: + + >>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize + >>> regexp_tokenize(s, pattern=r'\w+|\$[\d\.]+|\S+') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> blankline_tokenize(s) + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.'] + +Caution: The function ``regexp_tokenize()`` takes the text as its +first argument, and the regular expression pattern as its second +argument. This differs from the conventions used by Python's +``re`` functions, where the pattern is always the first argument. +(This is for consistency with the other NLTK tokenizers.) +""" + +import re + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import regexp_span_tokenize + + +class RegexpTokenizer(TokenizerI): + r""" + A tokenizer that splits a string using a regular expression, which + matches either the tokens or the separators between tokens. + + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + + :type pattern: str + :param pattern: The pattern used to build this tokenizer. + (This pattern must not contain capturing parentheses; + Use non-capturing parentheses, e.g. (?:...), instead) + :type gaps: bool + :param gaps: True if this tokenizer's pattern should be used + to find separators between tokens; False if this + tokenizer's pattern should be used to find the tokens + themselves. + :type discard_empty: bool + :param discard_empty: True if any empty tokens `''` + generated by the tokenizer should be discarded. Empty + tokens can only be generated if `_gaps == True`. + :type flags: int + :param flags: The regexp flags used to compile this + tokenizer's pattern. By default, the following flags are + used: `re.UNICODE | re.MULTILINE | re.DOTALL`. + + """ + + def __init__( + self, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, + ): + # If they gave us a regexp object, extract the pattern. + pattern = getattr(pattern, "pattern", pattern) + + self._pattern = pattern + self._gaps = gaps + self._discard_empty = discard_empty + self._flags = flags + self._regexp = None + + def _check_regexp(self): + if self._regexp is None: + self._regexp = re.compile(self._pattern, self._flags) + + def tokenize(self, text): + self._check_regexp() + # If our regexp matches gaps, use re.split: + if self._gaps: + if self._discard_empty: + return [tok for tok in self._regexp.split(text) if tok] + else: + return self._regexp.split(text) + + # If our regexp matches tokens, use re.findall: + else: + return self._regexp.findall(text) + + def span_tokenize(self, text): + self._check_regexp() + + if self._gaps: + for left, right in regexp_span_tokenize(text, self._regexp): + if not (self._discard_empty and left == right): + yield left, right + else: + for m in re.finditer(self._regexp, text): + yield m.span() + + def __repr__(self): + return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format( + self.__class__.__name__, + self._pattern, + self._gaps, + self._discard_empty, + self._flags, + ) + + +class WhitespaceTokenizer(RegexpTokenizer): + r""" + Tokenize a string on whitespace (space, tab, newline). + In general, users should use the string ``split()`` method instead. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WhitespaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s+", gaps=True) + + +class BlanklineTokenizer(RegexpTokenizer): + """ + Tokenize a string, treating any sequence of blank lines as a delimiter. + Blank lines are defined as lines containing no characters, except for + space or tab characters. + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s*\n\s*\n\s*", gaps=True) + + +class WordPunctTokenizer(RegexpTokenizer): + r""" + Tokenize a text into a sequence of alphabetic and + non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``. + + >>> from nltk.tokenize import WordPunctTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WordPunctTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\w+|[^\w\s]+") + + +###################################################################### +# { Tokenization Functions +###################################################################### + + +def regexp_tokenize( + text, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, +): + """ + Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` + for descriptions of the arguments. + """ + tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags) + return tokenizer.tokenize(text) + + +blankline_tokenize = BlanklineTokenizer().tokenize +wordpunct_tokenize = WordPunctTokenizer().tokenize diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/sexpr.py b/venv/lib/python3.10/site-packages/nltk/tokenize/sexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..0776642fbd2759c3f37352a97b18d915198cc20c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/sexpr.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Yoav Goldberg +# Steven Bird (minor edits) +# URL: +# For license information, see LICENSE.TXT + +""" +S-Expression Tokenizer + +``SExprTokenizer`` is used to find parenthesized expressions in a +string. In particular, it divides a string into a sequence of +substrings that are either parenthesized expressions (including any +nested parenthesized expressions), or other whitespace-separated +tokens. + + >>> from nltk.tokenize import SExprTokenizer + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +By default, `SExprTokenizer` will raise a ``ValueError`` exception if +used to tokenize an expression with non-matching parentheses: + + >>> SExprTokenizer().tokenize('c) d) e (f (g') + Traceback (most recent call last): + ... + ValueError: Un-matched close paren at char 1 + +The ``strict`` argument can be set to False to allow for +non-matching parentheses. Any unmatched close parentheses will be +listed as their own s-expression; and the last partial sexpr with +unmatched open parentheses will be listed as its own sexpr: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + +The characters used for open and close parentheses may be customized +using the ``parens`` argument to the `SExprTokenizer` constructor: + + >>> SExprTokenizer(parens='{}').tokenize('{a b {c d}} e f {g}') + ['{a b {c d}}', 'e', 'f', '{g}'] + +The s-expression tokenizer is also available as a function: + + >>> from nltk.tokenize import sexpr_tokenize + >>> sexpr_tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class SExprTokenizer(TokenizerI): + """ + A tokenizer that divides strings into s-expressions. + An s-expresion can be either: + + - a parenthesized expression, including any nested parenthesized + expressions, or + - a sequence of non-whitespace non-parenthesis characters. + + For example, the string ``(a (b c)) d e (f)`` consists of four + s-expressions: ``(a (b c))``, ``d``, ``e``, and ``(f)``. + + By default, the characters ``(`` and ``)`` are treated as open and + close parentheses, but alternative strings may be specified. + + :param parens: A two-element sequence specifying the open and close parentheses + that should be used to find sexprs. This will typically be either a + two-character string, or a list of two strings. + :type parens: str or list + :param strict: If true, then raise an exception when tokenizing an ill-formed sexpr. + """ + + def __init__(self, parens="()", strict=True): + if len(parens) != 2: + raise ValueError("parens must contain exactly two strings") + self._strict = strict + self._open_paren = parens[0] + self._close_paren = parens[1] + self._paren_regexp = re.compile( + f"{re.escape(parens[0])}|{re.escape(parens[1])}" + ) + + def tokenize(self, text): + """ + Return a list of s-expressions extracted from *text*. + For example: + + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + + All parentheses are assumed to mark s-expressions. + (No special processing is done to exclude parentheses that occur + inside strings, or following backslash characters.) + + If the given expression contains non-matching parentheses, + then the behavior of the tokenizer depends on the ``strict`` + parameter to the constructor. If ``strict`` is ``True``, then + raise a ``ValueError``. If ``strict`` is ``False``, then any + unmatched close parentheses will be listed as their own + s-expression; and the last partial s-expression with unmatched open + parentheses will be listed as its own s-expression: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + + :param text: the string to be tokenized + :type text: str or iter(str) + :rtype: iter(str) + """ + result = [] + pos = 0 + depth = 0 + for m in self._paren_regexp.finditer(text): + paren = m.group() + if depth == 0: + result += text[pos : m.start()].split() + pos = m.start() + if paren == self._open_paren: + depth += 1 + if paren == self._close_paren: + if self._strict and depth == 0: + raise ValueError("Un-matched close paren at char %d" % m.start()) + depth = max(0, depth - 1) + if depth == 0: + result.append(text[pos : m.end()]) + pos = m.end() + if self._strict and depth > 0: + raise ValueError("Un-matched open paren at char %d" % pos) + if pos < len(text): + result.append(text[pos:]) + return result + + +sexpr_tokenize = SExprTokenizer().tokenize diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/simple.py b/venv/lib/python3.10/site-packages/nltk/tokenize/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..f87b60a274c8121303ff60f203e1f3b991da1547 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/simple.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: Simple Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +r""" +Simple Tokenizers + +These tokenizers divide strings into substrings using the string +``split()`` method. +When tokenizing using a particular delimiter string, use +the string ``split()`` method directly, as this is more efficient. + +The simple tokenizers are *not* available as separate functions; +instead, you should just use the string ``split()`` method directly: + + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> s.split() # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + >>> s.split('\n') # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + +The simple tokenizers are mainly useful because they follow the +standard ``TokenizerI`` interface, and so can be used with any code +that expects a tokenizer. For example, these tokenizers can be used +to specify the tokenization conventions when building a `CorpusReader`. + +""" + +from nltk.tokenize.api import StringTokenizer, TokenizerI +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +class SpaceTokenizer(StringTokenizer): + r"""Tokenize a string using the space character as a delimiter, + which is the same as ``s.split(' ')``. + + >>> from nltk.tokenize import SpaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + """ + + _string = " " + + +class TabTokenizer(StringTokenizer): + r"""Tokenize a string use the tab character as a delimiter, + the same as ``s.split('\t')``. + + >>> from nltk.tokenize import TabTokenizer + >>> TabTokenizer().tokenize('a\tb c\n\t d') + ['a', 'b c\n', ' d'] + """ + + _string = "\t" + + +class CharTokenizer(StringTokenizer): + """Tokenize a string into individual characters. If this functionality + is ever required directly, use ``for char in string``. + """ + + def tokenize(self, s): + return list(s) + + def span_tokenize(self, s): + yield from enumerate(range(1, len(s) + 1)) + + +class LineTokenizer(TokenizerI): + r"""Tokenize a string into its lines, optionally discarding blank lines. + This is similar to ``s.split('\n')``. + + >>> from nltk.tokenize import LineTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + >>> # same as [l for l in s.split('\n') if l.strip()]: + >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', 'Thanks.'] + + :param blanklines: Indicates how blank lines should be handled. Valid values are: + + - ``discard``: strip blank lines out of the token list before returning it. + A line is considered blank if it contains only whitespace characters. + - ``keep``: leave all blank lines in the token list. + - ``discard-eof``: if the string ends with a newline, then do not generate + a corresponding token ``''`` after that newline. + """ + + def __init__(self, blanklines="discard"): + valid_blanklines = ("discard", "keep", "discard-eof") + if blanklines not in valid_blanklines: + raise ValueError( + "Blank lines must be one of: %s" % " ".join(valid_blanklines) + ) + + self._blanklines = blanklines + + def tokenize(self, s): + lines = s.splitlines() + # If requested, strip off blank lines. + if self._blanklines == "discard": + lines = [l for l in lines if l.rstrip()] + elif self._blanklines == "discard-eof": + if lines and not lines[-1].strip(): + lines.pop() + return lines + + # discard-eof not implemented + def span_tokenize(self, s): + if self._blanklines == "keep": + yield from string_span_tokenize(s, r"\n") + else: + yield from regexp_span_tokenize(s, r"\n(\s+\n)*") + + +###################################################################### +# { Tokenization Functions +###################################################################### +# XXX: it is stated in module docs that there is no function versions + + +def line_tokenize(text, blanklines="discard"): + return LineTokenizer(blanklines).tokenize(text) diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py b/venv/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py new file mode 100644 index 0000000000000000000000000000000000000000..24e43caae2dae6e3c76e66704fa9b856a6dc348c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py @@ -0,0 +1,194 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Sonority Sequencing Principle (SSP) is a language agnostic algorithm proposed +by Otto Jesperson in 1904. The sonorous quality of a phoneme is judged by the +openness of the lips. Syllable breaks occur before troughs in sonority. For more +on the SSP see Selkirk (1984). + +The default implementation uses the English alphabet, but the `sonority_hiearchy` +can be modified to IPA or any other alphabet for the use-case. The SSP is a +universal syllabification algorithm, but that does not mean it performs equally +across languages. Bartlett et al. (2009) is a good benchmark for English accuracy +if utilizing IPA (pg. 311). + +Importantly, if a custom hierarchy is supplied and vowels span across more than +one level, they should be given separately to the `vowels` class attribute. + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +""" + +import re +import warnings +from string import punctuation + +from nltk.tokenize.api import TokenizerI +from nltk.util import ngrams + + +class SyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Sonority Sequencing Principle (SSP). + + >>> from nltk.tokenize import SyllableTokenizer + >>> from nltk import word_tokenize + >>> SSP = SyllableTokenizer() + >>> SSP.tokenize('justification') + ['jus', 'ti', 'fi', 'ca', 'tion'] + >>> text = "This is a foobar-like sentence." + >>> [SSP.tokenize(token) for token in word_tokenize(text)] + [['This'], ['is'], ['a'], ['foo', 'bar', '-', 'li', 'ke'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__(self, lang="en", sonority_hierarchy=False): + """ + :param lang: Language parameter, default is English, 'en' + :type lang: str + :param sonority_hierarchy: Sonority hierarchy according to the + Sonority Sequencing Principle. + :type sonority_hierarchy: list(str) + """ + # Sonority hierarchy should be provided in descending order. + # If vowels are spread across multiple levels, they should be + # passed assigned self.vowels var together, otherwise should be + # placed in first index of hierarchy. + if not sonority_hierarchy and lang == "en": + sonority_hierarchy = [ + "aeiouy", # vowels. + "lmnrw", # nasals. + "zvsf", # fricatives. + "bcdgtkpqxhj", # stops. + ] + + self.vowels = sonority_hierarchy[0] + self.phoneme_map = {} + for i, level in enumerate(sonority_hierarchy): + for c in level: + sonority_level = len(sonority_hierarchy) - i + self.phoneme_map[c] = sonority_level + self.phoneme_map[c.upper()] = sonority_level + + def assign_values(self, token): + """ + Assigns each phoneme its value from the sonority hierarchy. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return: List of tuples, first element is character/phoneme and + second is the soronity value. + :rtype: list(tuple(str, int)) + """ + syllables_values = [] + for c in token: + try: + syllables_values.append((c, self.phoneme_map[c])) + except KeyError: + if c not in "0123456789" and c not in punctuation: + warnings.warn( + "Character not defined in sonority_hierarchy," + " assigning as vowel: '{}'".format(c) + ) + syllables_values.append((c, max(self.phoneme_map.values()))) + if c not in self.vowels: + self.vowels += c + else: # If it's a punctuation or numbers, assign -1. + syllables_values.append((c, -1)) + return syllables_values + + def validate_syllables(self, syllable_list): + """ + Ensures each syllable has at least one vowel. + If the following syllable doesn't have vowel, add it to the current one. + + :param syllable_list: Single word or token broken up into syllables. + :type syllable_list: list(str) + :return: Single word or token broken up into syllables + (with added syllables if necessary) + :rtype: list(str) + """ + valid_syllables = [] + front = "" + vowel_pattern = re.compile("|".join(self.vowels)) + for i, syllable in enumerate(syllable_list): + if syllable in punctuation: + valid_syllables.append(syllable) + continue + if not vowel_pattern.search(syllable): + if len(valid_syllables) == 0: + front += syllable + else: + valid_syllables = valid_syllables[:-1] + [ + valid_syllables[-1] + syllable + ] + else: + if len(valid_syllables) == 0: + valid_syllables.append(front + syllable) + else: + valid_syllables.append(syllable) + + return valid_syllables + + def tokenize(self, token): + """ + Apply the SSP to return a list of syllables. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + # assign values from hierarchy + syllables_values = self.assign_values(token) + + # if only one vowel return word + if sum(token.count(x) for x in self.vowels) <= 1: + return [token] + + syllable_list = [] + syllable = syllables_values[0][0] # start syllable with first phoneme + for trigram in ngrams(syllables_values, n=3): + phonemes, values = zip(*trigram) + # Sonority of previous, focal and following phoneme + prev_value, focal_value, next_value = values + # Focal phoneme. + focal_phoneme = phonemes[1] + + # These cases trigger syllable break. + if focal_value == -1: # If it's a punctuation, just break. + syllable_list.append(syllable) + syllable_list.append(focal_phoneme) + syllable = "" + elif prev_value >= focal_value == next_value: + syllable += focal_phoneme + syllable_list.append(syllable) + syllable = "" + + elif prev_value > focal_value < next_value: + syllable_list.append(syllable) + syllable = "" + syllable += focal_phoneme + + # no syllable break + else: + syllable += focal_phoneme + + syllable += syllables_values[-1][0] # append last phoneme + syllable_list.append(syllable) + + return self.validate_syllables(syllable_list) diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/texttiling.py b/venv/lib/python3.10/site-packages/nltk/tokenize/texttiling.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b770b2d08a998538d85803126e74cc13139d11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/texttiling.py @@ -0,0 +1,475 @@ +# Natural Language Toolkit: TextTiling +# +# Copyright (C) 2001-2023 NLTK Project +# Author: George Boutsioukis +# +# URL: +# For license information, see LICENSE.TXT + +import math +import re + +try: + import numpy +except ImportError: + pass + +from nltk.tokenize.api import TokenizerI + +BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1 +LC, HC = 0, 1 +DEFAULT_SMOOTHING = [0] + + +class TextTilingTokenizer(TokenizerI): + """Tokenize a document into topical sections using the TextTiling algorithm. + This algorithm detects subtopic shifts based on the analysis of lexical + co-occurrence patterns. + + The process starts by tokenizing the text into pseudosentences of + a fixed size w. Then, depending on the method used, similarity + scores are assigned at sentence gaps. The algorithm proceeds by + detecting the peak differences between these scores and marking + them as boundaries. The boundaries are normalized to the closest + paragraph break and the segmented text is returned. + + :param w: Pseudosentence size + :type w: int + :param k: Size (in sentences) of the block used in the block comparison method + :type k: int + :param similarity_method: The method used for determining similarity scores: + `BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`. + :type similarity_method: constant + :param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus) + :type stopwords: list(str) + :param smoothing_method: The method used for smoothing the score plot: + `DEFAULT_SMOOTHING` (default) + :type smoothing_method: constant + :param smoothing_width: The width of the window used by the smoothing method + :type smoothing_width: int + :param smoothing_rounds: The number of smoothing passes + :type smoothing_rounds: int + :param cutoff_policy: The policy used to determine the number of boundaries: + `HC` (default) or `LC` + :type cutoff_policy: constant + + >>> from nltk.corpus import brown + >>> tt = TextTilingTokenizer(demo_mode=True) + >>> text = brown.raw()[:4000] + >>> s, ss, d, b = tt.tokenize(text) + >>> b + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0] + """ + + def __init__( + self, + w=20, + k=10, + similarity_method=BLOCK_COMPARISON, + stopwords=None, + smoothing_method=DEFAULT_SMOOTHING, + smoothing_width=2, + smoothing_rounds=1, + cutoff_policy=HC, + demo_mode=False, + ): + + if stopwords is None: + from nltk.corpus import stopwords + + stopwords = stopwords.words("english") + self.__dict__.update(locals()) + del self.__dict__["self"] + + def tokenize(self, text): + """Return a tokenized copy of *text*, where each "token" represents + a separate topic.""" + + lowercase_text = text.lower() + paragraph_breaks = self._mark_paragraph_breaks(text) + text_length = len(lowercase_text) + + # Tokenization step starts here + + # Remove punctuation + nopunct_text = "".join( + c for c in lowercase_text if re.match(r"[a-z\-' \n\t]", c) + ) + nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text) + + tokseqs = self._divide_to_tokensequences(nopunct_text) + + # The morphological stemming step mentioned in the TextTile + # paper is not implemented. A comment in the original C + # implementation states that it offers no benefit to the + # process. It might be interesting to test the existing + # stemmers though. + # words = _stem_words(words) + + # Filter stopwords + for ts in tokseqs: + ts.wrdindex_list = [ + wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords + ] + + token_table = self._create_token_table(tokseqs, nopunct_par_breaks) + # End of the Tokenization step + + # Lexical score determination + if self.similarity_method == BLOCK_COMPARISON: + gap_scores = self._block_comparison(tokseqs, token_table) + elif self.similarity_method == VOCABULARY_INTRODUCTION: + raise NotImplementedError("Vocabulary introduction not implemented") + else: + raise ValueError( + f"Similarity method {self.similarity_method} not recognized" + ) + + if self.smoothing_method == DEFAULT_SMOOTHING: + smooth_scores = self._smooth_scores(gap_scores) + else: + raise ValueError(f"Smoothing method {self.smoothing_method} not recognized") + # End of Lexical score Determination + + # Boundary identification + depth_scores = self._depth_scores(smooth_scores) + segment_boundaries = self._identify_boundaries(depth_scores) + + normalized_boundaries = self._normalize_boundaries( + text, segment_boundaries, paragraph_breaks + ) + # End of Boundary Identification + segmented_text = [] + prevb = 0 + + for b in normalized_boundaries: + if b == 0: + continue + segmented_text.append(text[prevb:b]) + prevb = b + + if prevb < text_length: # append any text that may be remaining + segmented_text.append(text[prevb:]) + + if not segmented_text: + segmented_text = [text] + + if self.demo_mode: + return gap_scores, smooth_scores, depth_scores, segment_boundaries + return segmented_text + + def _block_comparison(self, tokseqs, token_table): + """Implements the block comparison method""" + + def blk_frq(tok, block): + ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences) + freq = sum(tsocc[1] for tsocc in ts_occs) + return freq + + gap_scores = [] + numgaps = len(tokseqs) - 1 + + for curr_gap in range(numgaps): + score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0 + score = 0.0 + # adjust window size for boundary conditions + if curr_gap < self.k - 1: + window_size = curr_gap + 1 + elif curr_gap > numgaps - self.k: + window_size = numgaps - curr_gap + else: + window_size = self.k + + b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]] + b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]] + + for t in token_table: + score_dividend += blk_frq(t, b1) * blk_frq(t, b2) + score_divisor_b1 += blk_frq(t, b1) ** 2 + score_divisor_b2 += blk_frq(t, b2) ** 2 + try: + score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2) + except ZeroDivisionError: + pass # score += 0.0 + + gap_scores.append(score) + + return gap_scores + + def _smooth_scores(self, gap_scores): + "Wraps the smooth function from the SciPy Cookbook" + return list( + smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1) + ) + + def _mark_paragraph_breaks(self, text): + """Identifies indented text or line breaks as the beginning of + paragraphs""" + MIN_PARAGRAPH = 100 + pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*") + matches = pattern.finditer(text) + + last_break = 0 + pbreaks = [0] + for pb in matches: + if pb.start() - last_break < MIN_PARAGRAPH: + continue + else: + pbreaks.append(pb.start()) + last_break = pb.start() + + return pbreaks + + def _divide_to_tokensequences(self, text): + "Divides the text into pseudosentences of fixed size" + w = self.w + wrdindex_list = [] + matches = re.finditer(r"\w+", text) + for match in matches: + wrdindex_list.append((match.group(), match.start())) + return [ + TokenSequence(i / w, wrdindex_list[i : i + w]) + for i in range(0, len(wrdindex_list), w) + ] + + def _create_token_table(self, token_sequences, par_breaks): + "Creates a table of TokenTableFields" + token_table = {} + current_par = 0 + current_tok_seq = 0 + pb_iter = par_breaks.__iter__() + current_par_break = next(pb_iter) + if current_par_break == 0: + try: + current_par_break = next(pb_iter) # skip break at 0 + except StopIteration as e: + raise ValueError( + "No paragraph breaks were found(text too short perhaps?)" + ) from e + for ts in token_sequences: + for word, index in ts.wrdindex_list: + try: + while index > current_par_break: + current_par_break = next(pb_iter) + current_par += 1 + except StopIteration: + # hit bottom + pass + + if word in token_table: + token_table[word].total_count += 1 + + if token_table[word].last_par != current_par: + token_table[word].last_par = current_par + token_table[word].par_count += 1 + + if token_table[word].last_tok_seq != current_tok_seq: + token_table[word].last_tok_seq = current_tok_seq + token_table[word].ts_occurences.append([current_tok_seq, 1]) + else: + token_table[word].ts_occurences[-1][1] += 1 + else: # new word + token_table[word] = TokenTableField( + first_pos=index, + ts_occurences=[[current_tok_seq, 1]], + total_count=1, + par_count=1, + last_par=current_par, + last_tok_seq=current_tok_seq, + ) + + current_tok_seq += 1 + + return token_table + + def _identify_boundaries(self, depth_scores): + """Identifies boundaries at the peaks of similarity score + differences""" + + boundaries = [0 for x in depth_scores] + + avg = sum(depth_scores) / len(depth_scores) + stdev = numpy.std(depth_scores) + + if self.cutoff_policy == LC: + cutoff = avg - stdev + else: + cutoff = avg - stdev / 2.0 + + depth_tuples = sorted(zip(depth_scores, range(len(depth_scores)))) + depth_tuples.reverse() + hp = list(filter(lambda x: x[0] > cutoff, depth_tuples)) + + for dt in hp: + boundaries[dt[1]] = 1 + for dt2 in hp: # undo if there is a boundary close already + if ( + dt[1] != dt2[1] + and abs(dt2[1] - dt[1]) < 4 + and boundaries[dt2[1]] == 1 + ): + boundaries[dt[1]] = 0 + return boundaries + + def _depth_scores(self, scores): + """Calculates the depth of each gap, i.e. the average difference + between the left and right peaks and the gap's score""" + + depth_scores = [0 for x in scores] + # clip boundaries: this holds on the rule of thumb(my thumb) + # that a section shouldn't be smaller than at least 2 + # pseudosentences for small texts and around 5 for larger ones. + + clip = min(max(len(scores) // 10, 2), 5) + index = clip + + for gapscore in scores[clip:-clip]: + lpeak = gapscore + for score in scores[index::-1]: + if score >= lpeak: + lpeak = score + else: + break + rpeak = gapscore + for score in scores[index:]: + if score >= rpeak: + rpeak = score + else: + break + depth_scores[index] = lpeak + rpeak - 2 * gapscore + index += 1 + + return depth_scores + + def _normalize_boundaries(self, text, boundaries, paragraph_breaks): + """Normalize the boundaries identified to the original text's + paragraph breaks""" + + norm_boundaries = [] + char_count, word_count, gaps_seen = 0, 0, 0 + seen_word = False + + for char in text: + char_count += 1 + if char in " \t\n" and seen_word: + seen_word = False + word_count += 1 + if char not in " \t\n" and not seen_word: + seen_word = True + if gaps_seen < len(boundaries) and word_count > ( + max(gaps_seen * self.w, self.w) + ): + if boundaries[gaps_seen] == 1: + # find closest paragraph break + best_fit = len(text) + for br in paragraph_breaks: + if best_fit > abs(br - char_count): + best_fit = abs(br - char_count) + bestbr = br + else: + break + if bestbr not in norm_boundaries: # avoid duplicates + norm_boundaries.append(bestbr) + gaps_seen += 1 + + return norm_boundaries + + +class TokenTableField: + """A field in the token table holding parameters for each token, + used later in the process""" + + def __init__( + self, + first_pos, + ts_occurences, + total_count=1, + par_count=1, + last_par=0, + last_tok_seq=None, + ): + self.__dict__.update(locals()) + del self.__dict__["self"] + + +class TokenSequence: + "A token list with its original length and its index" + + def __init__(self, index, wrdindex_list, original_length=None): + original_length = original_length or len(wrdindex_list) + self.__dict__.update(locals()) + del self.__dict__["self"] + + +# Pasted from the SciPy cookbook: https://www.scipy.org/Cookbook/SignalSmooth +def smooth(x, window_len=11, window="flat"): + """smooth the data using a window with requested size. + + This method is based on the convolution of a scaled window with the signal. + The signal is prepared by introducing reflected copies of the signal + (with the window size) in both ends so that transient parts are minimized + in the beginning and end part of the output signal. + + :param x: the input signal + :param window_len: the dimension of the smoothing window; should be an odd integer + :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' + flat window will produce a moving average smoothing. + + :return: the smoothed signal + + example:: + + t=linspace(-2,2,0.1) + x=sin(t)+randn(len(t))*0.1 + y=smooth(x) + + :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, + scipy.signal.lfilter + + TODO: the window parameter could be the window itself if an array instead of a string + """ + + if x.ndim != 1: + raise ValueError("smooth only accepts 1 dimension arrays.") + + if x.size < window_len: + raise ValueError("Input vector needs to be bigger than window size.") + + if window_len < 3: + return x + + if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: + raise ValueError( + "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" + ) + + s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]] + + # print(len(s)) + if window == "flat": # moving average + w = numpy.ones(window_len, "d") + else: + w = eval("numpy." + window + "(window_len)") + + y = numpy.convolve(w / w.sum(), s, mode="same") + + return y[window_len - 1 : -window_len + 1] + + +def demo(text=None): + from matplotlib import pylab + + from nltk.corpus import brown + + tt = TextTilingTokenizer(demo_mode=True) + if text is None: + text = brown.raw()[:10000] + s, ss, d, b = tt.tokenize(text) + pylab.xlabel("Sentence Gap index") + pylab.ylabel("Gap Scores") + pylab.plot(range(len(s)), s, label="Gap Scores") + pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores") + pylab.plot(range(len(d)), d, label="Depth scores") + pylab.stem(range(len(b)), b) + pylab.legend() + pylab.show() diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/treebank.py b/venv/lib/python3.10/site-packages/nltk/tokenize/treebank.py new file mode 100644 index 0000000000000000000000000000000000000000..e107f3838d965fd50270082efd4fe804ffcbe08d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/treebank.py @@ -0,0 +1,402 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Michael Heilman (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed) +# Tom Aarsen <> (modifications) +# +# URL: +# For license information, see LICENSE.TXT + +r""" + +Penn Treebank Tokenizer + +The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. +This implementation is a port of the tokenizer sed script written by Robert McIntyre +and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed. +""" + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.destructive import MacIntyreContractions +from nltk.tokenize.util import align_tokens + + +class TreebankWordTokenizer(TokenizerI): + r""" + The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. + + This tokenizer performs the following steps: + + - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` + - treat most punctuation characters as separate tokens + - split off commas and single quotes, when followed by whitespace + - separate periods that appear at the end of line + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.'] + >>> s = "They'll save and invest more." + >>> TreebankWordTokenizer().tokenize(s) + ['They', "'ll", 'save', 'and', 'invest', 'more', '.'] + >>> s = "hi, my name can't hello," + >>> TreebankWordTokenizer().tokenize(s) + ['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ','] + """ + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + (re.compile(r"\.\.\."), r" ... "), + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> TreebankWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str is not False: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(TreebankWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in TreebankWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) + + +class TreebankWordDetokenizer(TokenizerI): + r""" + The Treebank detokenizer uses the reverse regex operations corresponding to + the Treebank tokenizer's regexes. + + Note: + + - There're additional assumption mades when undoing the padding of ``[;@#$%&]`` + punctuation symbols that isn't presupposed in the TreebankTokenizer. + - There're additional regexes added in reversing the parentheses tokenization, + such as the ``r'([\]\)\}\>])\s([:;,.])'``, which removes the additional right + padding added to the closing parentheses precedding ``[:;,.]``. + - It's not possible to return the original whitespaces as they were because + there wasn't explicit records of where `'\n'`, `'\t'` or `'\s'` were removed at + the text.split() operation. + + >>> from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> d = TreebankWordDetokenizer() + >>> t = TreebankWordTokenizer() + >>> toks = t.tokenize(s) + >>> d.detokenize(toks) + 'Good muffins cost $3.88 in New York. Please buy me two of them. Thanks.' + + The MXPOST parentheses substitution can be undone using the ``convert_parentheses`` + parameter: + + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected_tokens = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '-LRB-', 'York', '-RRB-', '.', 'Please', '-LRB-', 'buy', + ... '-RRB-', 'me', 'two', 'of', 'them.', '-LRB-', 'Thanks', '-RRB-', '.'] + >>> expected_tokens == t.tokenize(s, convert_parentheses=True) + True + >>> expected_detoken = 'Good muffins cost $3.88 in New (York). Please (buy) me two of them. (Thanks).' + >>> expected_detoken == d.detokenize(t.tokenize(s, convert_parentheses=True), convert_parentheses=True) + True + + During tokenization it's safe to add more spaces but during detokenization, + simply undoing the padding doesn't really help. + + - During tokenization, left and right pad is added to ``[!?]``, when + detokenizing, only left shift the ``[!?]`` is needed. + Thus ``(re.compile(r'\s([?!])'), r'\g<1>')``. + + - During tokenization ``[:,]`` are left and right padded but when detokenizing, + only left shift is necessary and we keep right pad after comma/colon + if the string after is a non-digit. + Thus ``(re.compile(r'\s([:,])\s([^\d])'), r'\1 \2')``. + + >>> from nltk.tokenize.treebank import TreebankWordDetokenizer + >>> toks = ['hello', ',', 'i', 'ca', "n't", 'feel', 'my', 'feet', '!', 'Help', '!', '!'] + >>> twd = TreebankWordDetokenizer() + >>> twd.detokenize(toks) + "hello, i can't feel my feet! Help!!" + + >>> toks = ['hello', ',', 'i', "can't", 'feel', ';', 'my', 'feet', '!', + ... 'Help', '!', '!', 'He', 'said', ':', 'Help', ',', 'help', '?', '!'] + >>> twd.detokenize(toks) + "hello, i can't feel; my feet! Help!! He said: Help, help?!" + """ + + _contractions = MacIntyreContractions() + CONTRACTIONS2 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS2 + ] + CONTRACTIONS3 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS3 + ] + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"([^' ])\s('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1\2 "), + (re.compile(r"([^' ])\s('[sS]|'[mM]|'[dD]|') "), r"\1\2 "), + (re.compile(r"(\S)\s(\'\')"), r"\1\2"), + ( + re.compile(r"(\'\')\s([.,:)\]>};%])"), + r"\1\2", + ), # Quotes followed by no-left-padded punctuations. + (re.compile(r"''"), '"'), + ] + + # Handles double dashes + DOUBLE_DASHES = (re.compile(r" -- "), r"--") + + # Optionally: Convert parentheses, brackets and converts them from PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile("-LRB-"), "("), + (re.compile("-RRB-"), ")"), + (re.compile("-LSB-"), "["), + (re.compile("-RSB-"), "]"), + (re.compile("-LCB-"), "{"), + (re.compile("-RCB-"), "}"), + ] + + # Undo padding on parentheses. + PARENS_BRACKETS = [ + (re.compile(r"([\[\(\{\<])\s"), r"\g<1>"), + (re.compile(r"\s([\]\)\}\>])"), r"\g<1>"), + (re.compile(r"([\]\)\}\>])\s([:;,.])"), r"\1\2"), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([^'])\s'\s"), r"\1' "), + (re.compile(r"\s([?!])"), r"\g<1>"), # Strip left pad for [?!] + # (re.compile(r'\s([?!])\s'), r'\g<1>'), + (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r"\1\2\3"), + # When tokenizing, [;@#$%&] are padded with whitespace regardless of + # whether there are spaces before or after them. + # But during detokenization, we need to distinguish between left/right + # pad, so we split this up. + (re.compile(r"([#$])\s"), r"\g<1>"), # Left pad. + (re.compile(r"\s([;%])"), r"\g<1>"), # Right pad. + # (re.compile(r"\s([&*])\s"), r" \g<1> "), # Unknown pad. + (re.compile(r"\s\.\.\.\s"), r"..."), + # (re.compile(r"\s([:,])\s$"), r"\1"), # .strip() takes care of it. + ( + re.compile(r"\s([:,])"), + r"\1", + ), # Just remove left padding. Punctuation in numbers won't be padded. + ] + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"([ (\[{<])\s``"), r"\1``"), + (re.compile(r"(``)\s"), r"\1"), + (re.compile(r"``"), r'"'), + ] + + def tokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """ + Treebank detokenizer, created by undoing the regexes from + the TreebankWordTokenizer.tokenize. + + :param tokens: A list of strings, i.e. tokenized text. + :type tokens: List[str] + :param convert_parentheses: if True, replace PTB symbols with parentheses, + e.g. `-LRB-` to `(`. Defaults to False. + :type convert_parentheses: bool, optional + :return: str + """ + text = " ".join(tokens) + + # Add extra space to make things easier + text = " " + text + " " + + # Reverse the contractions regexes. + # Note: CONTRACTIONS4 are not used in tokenization. + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r"\1\2", text) + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r"\1\2", text) + + # Reverse the regexes applied for ending quotes. + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + # Undo the space padding. + text = text.strip() + + # Reverse the padding on double dashes. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Reverse the padding regexes applied for parenthesis/brackets. + for regexp, substitution in self.PARENS_BRACKETS: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for punctuations. + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for starting quotes. + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + return text.strip() + + def detokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """Duck-typing the abstract *tokenize()*.""" + return self.tokenize(tokens, convert_parentheses) diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__init__.py b/venv/lib/python3.10/site-packages/nltk/twitter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd14ffb4703bf38bb349cc19cca2d97b6df29f77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/__init__.py @@ -0,0 +1,35 @@ +# Natural Language Toolkit: Twitter +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Twitter Package + +This package contains classes for retrieving Tweet documents using the +Twitter API. + +""" +try: + import twython +except ImportError: + import warnings + + warnings.warn( + "The twython library has not been installed. " + "Some functionality from the twitter package will not be available." + ) +else: + from nltk.twitter.util import Authenticate, credsfromfile + from nltk.twitter.twitterclient import ( + Streamer, + Query, + Twitter, + TweetViewer, + TweetWriter, + ) + + +from nltk.twitter.common import json2csv diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb35e8301b90acb580103af149f713b0cde6cc8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4554b9d87c73d0b3c3cf6327aad74bebfcc55b49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88841ebba4a6bc879e9c407cfb916183f3226e6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/api.py b/venv/lib/python3.10/site-packages/nltk/twitter/api.py new file mode 100644 index 0000000000000000000000000000000000000000..71248b176340abd0d0d7d51e8ed68700f7948e13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/api.py @@ -0,0 +1,145 @@ +# Natural Language Toolkit: Twitter API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides an interface for TweetHandlers, and support for timezone +handling. +""" + +import time as _time +from abc import ABCMeta, abstractmethod +from datetime import datetime, timedelta, timezone, tzinfo + + +class LocalTimezoneOffsetWithUTC(tzinfo): + """ + This is not intended to be a general purpose class for dealing with the + local timezone. In particular: + + * it assumes that the date passed has been created using + `datetime(..., tzinfo=Local)`, where `Local` is an instance of + the object `LocalTimezoneOffsetWithUTC`; + * for such an object, it returns the offset with UTC, used for date comparisons. + + Reference: https://docs.python.org/3/library/datetime.html + """ + + STDOFFSET = timedelta(seconds=-_time.timezone) + + if _time.daylight: + DSTOFFSET = timedelta(seconds=-_time.altzone) + else: + DSTOFFSET = STDOFFSET + + def utcoffset(self, dt): + """ + Access the relevant time offset. + """ + return self.DSTOFFSET + + +LOCAL = LocalTimezoneOffsetWithUTC() + + +class BasicTweetHandler(metaclass=ABCMeta): + """ + Minimal implementation of `TweetHandler`. + + Counts the number of Tweets and decides when the client should stop + fetching them. + """ + + def __init__(self, limit=20): + self.limit = limit + self.counter = 0 + + """ + A flag to indicate to the client whether to stop fetching data given + some condition (e.g., reaching a date limit). + """ + self.do_stop = False + + """ + Stores the id of the last fetched Tweet to handle pagination. + """ + self.max_id = None + + def do_continue(self): + """ + Returns `False` if the client should stop fetching Tweets. + """ + return self.counter < self.limit and not self.do_stop + + +class TweetHandlerI(BasicTweetHandler): + """ + Interface class whose subclasses should implement a handle method that + Twitter clients can delegate to. + """ + + def __init__(self, limit=20, upper_date_limit=None, lower_date_limit=None): + """ + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting\ + new data. See `upper_data_limit` for formatting. + """ + BasicTweetHandler.__init__(self, limit) + + self.upper_date_limit = None + self.lower_date_limit = None + if upper_date_limit: + self.upper_date_limit = datetime(*upper_date_limit, tzinfo=LOCAL) + if lower_date_limit: + self.lower_date_limit = datetime(*lower_date_limit, tzinfo=LOCAL) + + self.startingup = True + + @abstractmethod + def handle(self, data): + """ + Deal appropriately with data returned by the Twitter API + """ + + @abstractmethod + def on_finish(self): + """ + Actions when the tweet limit has been reached + """ + + def check_date_limit(self, data, verbose=False): + """ + Validate date limits. + """ + if self.upper_date_limit or self.lower_date_limit: + date_fmt = "%a %b %d %H:%M:%S +0000 %Y" + tweet_date = datetime.strptime(data["created_at"], date_fmt).replace( + tzinfo=timezone.utc + ) + if (self.upper_date_limit and tweet_date > self.upper_date_limit) or ( + self.lower_date_limit and tweet_date < self.lower_date_limit + ): + if self.upper_date_limit: + message = "earlier" + date_limit = self.upper_date_limit + else: + message = "later" + date_limit = self.lower_date_limit + if verbose: + print( + "Date limit {} is {} than date of current tweet {}".format( + date_limit, message, tweet_date + ) + ) + self.do_stop = True diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/common.py b/venv/lib/python3.10/site-packages/nltk/twitter/common.py new file mode 100644 index 0000000000000000000000000000000000000000..d9428724cfa8cae69e14d899cb73eee5607475d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/common.py @@ -0,0 +1,270 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for the `twitterclient` module which do not require +the `twython` library to have been installed. +""" +import csv +import gzip +import json + +from nltk.internals import deprecated + +HIER_SEPARATOR = "." + + +def extract_fields(tweet, fields): + """ + Extract field values from a full tweet and return them as a list + + :param json tweet: The tweet in JSON format + :param list fields: The fields to be extracted from the tweet + :rtype: list(str) + """ + out = [] + for field in fields: + try: + _add_field_to_out(tweet, field, out) + except TypeError as e: + raise RuntimeError( + "Fatal error when extracting fields. Cannot find field ", field + ) from e + return out + + +def _add_field_to_out(json, field, out): + if _is_composed_key(field): + key, value = _get_key_value_composed(field) + _add_field_to_out(json[key], value, out) + else: + out += [json[field]] + + +def _is_composed_key(field): + return HIER_SEPARATOR in field + + +def _get_key_value_composed(field): + out = field.split(HIER_SEPARATOR) + # there could be up to 3 levels + key = out[0] + value = HIER_SEPARATOR.join(out[1:]) + return key, value + + +def _get_entity_recursive(json, entity): + if not json: + return None + elif isinstance(json, dict): + for key, value in json.items(): + if key == entity: + return value + # 'entities' and 'extended_entities' are wrappers in Twitter json + # structure that contain other Twitter objects. See: + # https://dev.twitter.com/overview/api/entities-in-twitter-objects + + if key == "entities" or key == "extended_entities": + candidate = _get_entity_recursive(value, entity) + if candidate is not None: + return candidate + return None + elif isinstance(json, list): + for item in json: + candidate = _get_entity_recursive(item, entity) + if candidate is not None: + return candidate + return None + else: + return None + + +def json2csv( + fp, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full tweets to be easily converted + to a CSV file for easier processing. For example, just TweetIDs or + just the text content of the Tweets can be extracted. + + Additionally, the function allows combinations of fields of other Twitter + objects (mainly the users, see below). + + For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see + `json2csv_entities` + + :param str infile: The name of the file containing full tweets + + :param str outfile: The name of the text file where results should be\ + written + + :param list fields: The list of fields to be extracted. Useful examples\ + are 'id_str' for the tweetID and 'text' for the text of the tweet. See\ + for a full list of fields.\ + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\ + Additionally, it allows IDs from other Twitter objects, e. g.,\ + ['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count'] + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + # write the list of fields as header + writer.writerow(fields) + # process the file + for line in fp: + tweet = json.loads(line) + row = extract_fields(tweet, fields) + writer.writerow(row) + outf.close() + + +@deprecated("Use open() and csv.writer() directly instead.") +def outf_writer_compat(outfile, encoding, errors, gzip_compress=False): + """Get a CSV writer with optional compression.""" + return _outf_writer(outfile, encoding, errors, gzip_compress) + + +def _outf_writer(outfile, encoding, errors, gzip_compress=False): + if gzip_compress: + outf = gzip.open(outfile, "wt", newline="", encoding=encoding, errors=errors) + else: + outf = open(outfile, "w", newline="", encoding=encoding, errors=errors) + writer = csv.writer(outf) + return (writer, outf) + + +def json2csv_entities( + tweets_file, + outfile, + main_fields, + entity_type, + entity_fields, + encoding="utf8", + errors="replace", + gzip_compress=False, +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full Tweets to be easily converted + to a CSV file for easier processing of Twitter entities. For example, the + hashtags or media elements of a tweet can be extracted. + + It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags + there will be two lines in the output file, one per hashtag + + :param tweets_file: the file-like object containing full Tweets + + :param str outfile: The path of the text file where results should be\ + written + + :param list main_fields: The list of fields to be extracted from the main\ + object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\ + for a full list of fields. + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count'] + If `entity_type` is expressed with hierarchy, then it is the list of\ + fields of the object that corresponds to the key of the entity_type,\ + (e.g., for entity_type='user.urls', the fields in the main_fields list\ + belong to the user object; for entity_type='place.bounding_box', the\ + files in the main_field list belong to the place object of the tweet). + + :param list entity_type: The name of the entity: 'hashtags', 'media',\ + 'urls' and 'user_mentions' for the tweet object. For a user object,\ + this needs to be expressed with a hierarchy: `'user.urls'`. For the\ + bounding box of the Tweet location, use `'place.bounding_box'`. + + :param list entity_fields: The list of fields to be extracted from the\ + entity. E.g. `['text']` (of the Tweet) + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + header = get_header_field_list(main_fields, entity_type, entity_fields) + writer.writerow(header) + for line in tweets_file: + tweet = json.loads(line) + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + object_json = _get_entity_recursive(tweet, key) + if not object_json: + # this can happen in the case of "place" + continue + object_fields = extract_fields(object_json, main_fields) + items = _get_entity_recursive(object_json, value) + _write_to_file(object_fields, items, entity_fields, writer) + else: + tweet_fields = extract_fields(tweet, main_fields) + items = _get_entity_recursive(tweet, entity_type) + _write_to_file(tweet_fields, items, entity_fields, writer) + outf.close() + + +def get_header_field_list(main_fields, entity_type, entity_fields): + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + main_entity = key + sub_entity = value + else: + main_entity = None + sub_entity = entity_type + + if main_entity: + output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields] + else: + output1 = main_fields + output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields] + return output1 + output2 + + +def _write_to_file(object_fields, items, entity_fields, writer): + if not items: + # it could be that the entity is just not present for the tweet + # e.g. tweet hashtag is always present, even as [], however + # tweet media may not be present + return + if isinstance(items, dict): + # this happens e.g. for "place" of a tweet + row = object_fields + # there might be composed keys in de list of required fields + entity_field_values = [x for x in entity_fields if not _is_composed_key(x)] + entity_field_composed = [x for x in entity_fields if _is_composed_key(x)] + for field in entity_field_values: + value = items[field] + if isinstance(value, list): + row += value + else: + row += [value] + # now check required dictionaries + for d in entity_field_composed: + kd, vd = _get_key_value_composed(d) + json_dict = items[kd] + if not isinstance(json_dict, dict): + raise RuntimeError( + """Key {} does not contain a dictionary + in the json file""".format( + kd + ) + ) + row += [json_dict[vd]] + writer.writerow(row) + return + # in general it is a list + for item in items: + row = object_fields + extract_fields(item, entity_fields) + writer.writerow(row) diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py b/venv/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..554bdfef511190b28504f9ded8dc8a6098e16ed9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py @@ -0,0 +1,306 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Examples to demo the :py:mod:`twitterclient` code. + +These demo functions should all run, with the following caveats: + +* You must have obtained API keys from Twitter, and installed them according to + the instructions in the `twitter HOWTO `_. + +* If you are on a slow network, some of the calls to the Twitter API may + timeout. + +* If you are being rate limited while searching, you will receive a 420 + error response. + +* Your terminal window / console must be able to display UTF-8 encoded characters. + +For documentation about the Twitter APIs, see `The Streaming APIs Overview +`_ and `The REST APIs Overview +`_. + +For error codes see Twitter's +`Error Codes and Responses ` +""" + +import datetime +import json +from functools import wraps +from io import StringIO + +from nltk.twitter import ( + Query, + Streamer, + TweetViewer, + TweetWriter, + Twitter, + credsfromfile, +) + +SPACER = "###################################" + + +def verbose(func): + """Decorator for demo functions""" + + @wraps(func) + def with_formatting(*args, **kwargs): + print() + print(SPACER) + print("Using %s" % (func.__name__)) + print(SPACER) + return func(*args, **kwargs) + + return with_formatting + + +def yesterday(): + """ + Get yesterday's datetime as a 5-tuple. + """ + date = datetime.datetime.now() + date -= datetime.timedelta(days=1) + date_tuple = date.timetuple()[:6] + return date_tuple + + +def setup(): + """ + Initialize global variables for the demos. + """ + global USERIDS, FIELDS + + USERIDS = ["759251", "612473", "15108702", "6017542", "2673523800"] + # UserIDs corresponding to\ + # @CNN, @BBCNews, @ReutersLive, @BreakingNews, @AJELive + FIELDS = ["id_str"] + + +@verbose +def twitterclass_demo(): + """ + Use the simplified :class:`Twitter` class to write some tweets to a file. + """ + tw = Twitter() + print("Track from the public stream\n") + tw.tweets(keywords="love, hate", limit=10) # public stream + print(SPACER) + print("Search past Tweets\n") + tw = Twitter() + tw.tweets(keywords="love, hate", stream=False, limit=10) # search past tweets + print(SPACER) + print( + "Follow two accounts in the public stream" + + " -- be prepared to wait a few minutes\n" + ) + tw = Twitter() + tw.tweets(follow=["759251", "6017542"], stream=True, limit=5) # public stream + + +@verbose +def sampletoscreen_demo(limit=20): + """ + Sample from the Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.sample() + + +@verbose +def tracktoscreen_demo(track="taylor swift", limit=10): + """ + Track keywords from the public Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.filter(track=track) + + +@verbose +def search_demo(keywords="nltk"): + """ + Use the REST API to search for past tweets containing a given keyword. + """ + oauth = credsfromfile() + client = Query(**oauth) + for tweet in client.search_tweets(keywords=keywords, limit=10): + print(tweet["text"]) + + +@verbose +def tweets_by_user_demo(user="NLTK_org", count=200): + """ + Use the REST API to search for past tweets by a given user. + """ + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetWriter()) + client.user_tweets(user, count) + + +@verbose +def lookup_by_userid_demo(): + """ + Use the REST API to convert a userID to a screen name. + """ + oauth = credsfromfile() + client = Query(**oauth) + user_info = client.user_info_from_id(USERIDS) + for info in user_info: + name = info["screen_name"] + followers = info["followers_count"] + following = info["friends_count"] + print(f"{name}, followers: {followers}, following: {following}") + + +@verbose +def followtoscreen_demo(limit=10): + """ + Using the Streaming API, select just the tweets from a specified list of + userIDs. + + This is will only give results in a reasonable time if the users in + question produce a high volume of tweets, and may even so show some delay. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.statuses.filter(follow=USERIDS) + + +@verbose +def streamtofile_demo(limit=20): + """ + Write 20 tweets sampled from the public Streaming API to a file. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetWriter(limit=limit, repeat=False)) + client.statuses.sample() + + +@verbose +def limit_by_time_demo(keywords="nltk"): + """ + Query the REST API for Tweets about NLTK since yesterday and send + the output to terminal. + + This example makes the assumption that there are sufficient Tweets since + yesterday for the date to be an effective cut-off. + """ + date = yesterday() + dt_date = datetime.datetime(*date) + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetViewer(limit=100, lower_date_limit=date)) + + print(f"Cutoff date: {dt_date}\n") + + for tweet in client.search_tweets(keywords=keywords): + print("{} ".format(tweet["created_at"]), end="") + client.handler.handle(tweet) + + +@verbose +def corpusreader_demo(): + """ + Use `TwitterCorpusReader` tp read a file of tweets, and print out + + * some full tweets in JSON format; + * some raw strings from the tweets (i.e., the value of the `text` field); and + * the result of tokenising the raw strings. + + """ + from nltk.corpus import twitter_samples as tweets + + print() + print("Complete tweet documents") + print(SPACER) + for tweet in tweets.docs("tweets.20150430-223406.json")[:1]: + print(json.dumps(tweet, indent=1, sort_keys=True)) + + print() + print("Raw tweet strings:") + print(SPACER) + for text in tweets.strings("tweets.20150430-223406.json")[:15]: + print(text) + + print() + print("Tokenized tweet strings:") + print(SPACER) + for toks in tweets.tokenized("tweets.20150430-223406.json")[:15]: + print(toks) + + +@verbose +def expand_tweetids_demo(): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets, if available. + + """ + ids_f = StringIO( + """\ + 588665495492124672 + 588665495487909888 + 588665495508766721 + 588665495513006080 + 588665495517200384 + 588665495487811584 + 588665495525588992 + 588665495487844352 + 588665495492014081 + 588665495512948737""" + ) + oauth = credsfromfile() + client = Query(**oauth) + hydrated = client.expand_tweetids(ids_f) + + for tweet in hydrated: + id_str = tweet["id_str"] + print(f"id: {id_str}") + text = tweet["text"] + if text.startswith("@null"): + text = "[Tweet not available]" + print(text + "\n") + + +ALL = [ + twitterclass_demo, + sampletoscreen_demo, + tracktoscreen_demo, + search_demo, + tweets_by_user_demo, + lookup_by_userid_demo, + followtoscreen_demo, + streamtofile_demo, + limit_by_time_demo, + corpusreader_demo, + expand_tweetids_demo, +] + +""" +Select demo functions to run. E.g. replace the following line with "DEMOS = +ALL[8:]" to execute only the final three demos. +""" +DEMOS = ALL[:] + +if __name__ == "__main__": + setup() + + for demo in DEMOS: + demo() + + print("\n" + SPACER) + print("All demos completed") + print(SPACER) diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/twitterclient.py b/venv/lib/python3.10/site-packages/nltk/twitter/twitterclient.py new file mode 100644 index 0000000000000000000000000000000000000000..d556738e0849faf35454166cec8a5949fcca93dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/twitterclient.py @@ -0,0 +1,564 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + + +""" +NLTK Twitter client + +This module offers methods for collecting and processing Tweets. Most of the +functionality depends on access to the Twitter APIs, and this is handled via +the third party Twython library. + +If one of the methods below returns an integer, it is probably a `Twitter +error code `_. For +example, the response of '420' means that you have reached the limit of the +requests you can currently make to the Twitter API. Currently, `rate limits +for the search API `_ are +divided into 15 minute windows. +""" + +import datetime +import gzip +import itertools +import json +import os +import time + +import requests +from twython import Twython, TwythonStreamer +from twython.exceptions import TwythonError, TwythonRateLimitError + +from nltk.twitter.api import BasicTweetHandler, TweetHandlerI +from nltk.twitter.util import credsfromfile, guess_path + + +class Streamer(TwythonStreamer): + """ + Retrieve data from the Twitter Streaming API. + + The streaming API requires + `OAuth 1.0 `_ authentication. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + + self.handler = None + self.do_continue = True + TwythonStreamer.__init__( + self, app_key, app_secret, oauth_token, oauth_token_secret + ) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing + """ + self.handler = handler + + def on_success(self, data): + """ + :param data: response from Twitter API + """ + if self.do_continue: + if self.handler is not None: + if "text" in data: + self.handler.counter += 1 + self.handler.handle(data) + self.do_continue = self.handler.do_continue() + else: + raise ValueError("No data handler has been registered.") + else: + self.disconnect() + self.handler.on_finish() + + def on_error(self, status_code, data): + """ + :param status_code: The status code returned by the Twitter API + :param data: The response from Twitter API + + """ + print(status_code) + + def sample(self): + """ + Wrapper for 'statuses / sample' API call + """ + while self.do_continue: + + # Stream in an endless loop until limit is reached. See twython + # issue 288: https://github.com/ryanmcgrath/twython/issues/288 + # colditzjb commented on 9 Dec 2014 + + try: + self.statuses.sample() + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + def filter(self, track="", follow="", lang="en"): + """ + Wrapper for 'statuses / filter' API call + """ + while self.do_continue: + # Stream in an endless loop until limit is reached + + try: + if track == "" and follow == "": + msg = "Please supply a value for 'track', 'follow'" + raise ValueError(msg) + self.statuses.filter(track=track, follow=follow, lang=lang) + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + +class Query(Twython): + """ + Retrieve data from the Twitter REST API. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + """ + :param app_key: (optional) Your applications key + :param app_secret: (optional) Your applications secret key + :param oauth_token: (optional) When using **OAuth 1**, combined with + oauth_token_secret to make authenticated calls + :param oauth_token_secret: (optional) When using **OAuth 1** combined + with oauth_token to make authenticated calls + """ + self.handler = None + self.do_continue = True + Twython.__init__(self, app_key, app_secret, oauth_token, oauth_token_secret) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing or writing Tweets to a file. + """ + self.handler = handler + + def expand_tweetids(self, ids_f, verbose=True): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets from the Twitter API. + + The API call `statuses/lookup` will fail to retrieve a Tweet if the + user has deleted it. + + This call to the Twitter API is rate-limited. See + for details. + + :param ids_f: input file object consisting of Tweet IDs, one to a line + :return: iterable of Tweet objects in JSON format + """ + ids = [line.strip() for line in ids_f if line] + + if verbose: + print(f"Counted {len(ids)} Tweet IDs in {ids_f}.") + + # The Twitter endpoint takes lists of up to 100 ids, so we chunk the + # ids. + id_chunks = [ids[i : i + 100] for i in range(0, len(ids), 100)] + + chunked_tweets = (self.lookup_status(id=chunk) for chunk in id_chunks) + + return itertools.chain.from_iterable(chunked_tweets) + + def _search_tweets(self, keywords, limit=100, lang="en"): + """ + Assumes that the handler has been informed. Fetches Tweets from + search_tweets generator output and passses them to handler + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string. + :param int limit: Number of Tweets to process + :param str lang: language + """ + while True: + tweets = self.search_tweets( + keywords=keywords, limit=limit, lang=lang, max_id=self.handler.max_id + ) + for tweet in tweets: + self.handler.handle(tweet) + if not (self.handler.do_continue() and self.handler.repeat): + break + self.handler.on_finish() + + def search_tweets( + self, + keywords, + limit=100, + lang="en", + max_id=None, + retries_after_twython_exception=0, + ): + """ + Call the REST API ``'search/tweets'`` endpoint with some plausible + defaults. See `the Twitter search documentation + `_ for more information + about admissible search parameters. + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string + :param int limit: Number of Tweets to process + :param str lang: language + :param int max_id: id of the last tweet fetched + :param int retries_after_twython_exception: number of retries when\ + searching Tweets before raising an exception + :rtype: python generator + """ + if not self.handler: + # if no handler is provided, `BasicTweetHandler` provides minimum + # functionality for limiting the number of Tweets retrieved + self.handler = BasicTweetHandler(limit=limit) + + count_from_query = 0 + if max_id: + self.handler.max_id = max_id + else: + results = self.search( + q=keywords, count=min(100, limit), lang=lang, result_type="recent" + ) + count = len(results["statuses"]) + if count == 0: + print("No Tweets available through REST API for those keywords") + return + count_from_query = count + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + # Pagination loop: keep fetching Tweets until the desired count is + # reached while dealing with Twitter rate limits. + retries = 0 + while count_from_query < limit: + try: + mcount = min(100, limit - count_from_query) + results = self.search( + q=keywords, + count=mcount, + lang=lang, + max_id=self.handler.max_id, + result_type="recent", + ) + except TwythonRateLimitError as e: + print(f"Waiting for 15 minutes -{e}") + time.sleep(15 * 60) # wait 15 minutes + continue + except TwythonError as e: + print(f"Fatal error in Twython request -{e}") + if retries_after_twython_exception == retries: + raise e + retries += 1 + + count = len(results["statuses"]) + if count == 0: + print("No more Tweets available through rest api") + return + count_from_query += count + # the max_id is also present in the Tweet metadata + # results['search_metadata']['next_results'], but as part of a + # query and difficult to fetch. This is doing the equivalent + # (last tweet id minus one) + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + def user_info_from_id(self, userids): + """ + Convert a list of userIDs into a variety of information about the users. + + See . + + :param list userids: A list of integer strings corresponding to Twitter userIDs + :rtype: list(json) + """ + return [self.show_user(user_id=userid) for userid in userids] + + def user_tweets(self, screen_name, limit, include_rts="false"): + """ + Return a collection of the most recent Tweets posted by the user + + :param str user: The user's screen name; the initial '@' symbol\ + should be omitted + :param int limit: The number of Tweets to recover; 200 is the maximum allowed + :param str include_rts: Whether to include statuses which have been\ + retweeted by the user; possible values are 'true' and 'false' + """ + data = self.get_user_timeline( + screen_name=screen_name, count=limit, include_rts=include_rts + ) + for item in data: + self.handler.handle(item) + + +class Twitter: + """ + Wrapper class with restricted functionality and fewer options. + """ + + def __init__(self): + self._oauth = credsfromfile() + self.streamer = Streamer(**self._oauth) + self.query = Query(**self._oauth) + + def tweets( + self, + keywords="", + follow="", + to_screen=True, + stream=True, + limit=100, + date_limit=None, + lang="en", + repeat=False, + gzip_compress=False, + ): + """ + Process some Tweets in a simple manner. + + :param str keywords: Keywords to use for searching or filtering + :param list follow: UserIDs to use for filtering Tweets from the public stream + :param bool to_screen: If `True`, display the tweet texts on the screen,\ + otherwise print to a file + + :param bool stream: If `True`, use the live public stream,\ + otherwise search past public Tweets + + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + Note that, in the case of streaming, this is the maximum date, i.e.\ + a date in the future; if not, it is the minimum date, i.e. a date\ + in the past + + :param str lang: language + + :param bool repeat: A flag to determine whether multiple files should\ + be written. If `True`, the length of each file will be set by the\ + value of `limit`. Use only if `to_screen` is `False`. See also + :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + if to_screen: + handler = TweetViewer( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + ) + else: + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if to_screen: + handler = TweetViewer(limit=limit) + else: + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if stream: + self.streamer.register(handler) + if keywords == "" and follow == "": + self.streamer.sample() + else: + self.streamer.filter(track=keywords, follow=follow, lang=lang) + else: + self.query.register(handler) + if keywords == "": + raise ValueError("Please supply at least one keyword to search for.") + else: + self.query._search_tweets(keywords, limit=limit, lang=lang) + + +class TweetViewer(TweetHandlerI): + """ + Handle data by sending it to the terminal. + """ + + def handle(self, data): + """ + Direct data to `sys.stdout` + + :return: return ``False`` if processing should cease, otherwise return ``True``. + :rtype: bool + :param data: Tweet object returned by Twitter API + """ + text = data["text"] + print(text) + + self.check_date_limit(data) + if self.do_stop: + return + + def on_finish(self): + print(f"Written {self.counter} Tweets") + + +class TweetWriter(TweetHandlerI): + """ + Handle data by writing it to a file. + """ + + def __init__( + self, + limit=2000, + upper_date_limit=None, + lower_date_limit=None, + fprefix="tweets", + subdir="twitter-files", + repeat=False, + gzip_compress=False, + ): + """ + The difference between the upper and lower date limits depends on + whether Tweets are coming in an ascending date order (i.e. when + streaming) or descending date order (i.e. when searching past Tweets). + + :param int limit: number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting new\ + data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`. E.g. `upper_date_limit=(2015, 4, 1, 12,\ + 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting new\ + data. See `upper_data_limit` for formatting. + + :param str fprefix: The prefix to use in creating file names for Tweet\ + collections. + + :param str subdir: The name of the directory where Tweet collection\ + files should be stored. + + :param bool repeat: flag to determine whether multiple files should be\ + written. If `True`, the length of each file will be set by the value\ + of `limit`. See also :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + self.fprefix = fprefix + self.subdir = guess_path(subdir) + self.gzip_compress = gzip_compress + self.fname = self.timestamped_file() + self.repeat = repeat + self.output = None + TweetHandlerI.__init__(self, limit, upper_date_limit, lower_date_limit) + + def timestamped_file(self): + """ + :return: timestamped file name + :rtype: str + """ + subdir = self.subdir + fprefix = self.fprefix + if subdir: + if not os.path.exists(subdir): + os.mkdir(subdir) + + fname = os.path.join(subdir, fprefix) + fmt = "%Y%m%d-%H%M%S" + timestamp = datetime.datetime.now().strftime(fmt) + if self.gzip_compress: + suffix = ".gz" + else: + suffix = "" + outfile = f"{fname}.{timestamp}.json{suffix}" + return outfile + + def handle(self, data): + """ + Write Twitter data as line-delimited JSON into one or more files. + + :return: return `False` if processing should cease, otherwise return `True`. + :param data: tweet object returned by Twitter API + """ + if self.startingup: + if self.gzip_compress: + self.output = gzip.open(self.fname, "w") + else: + self.output = open(self.fname, "w") + print(f"Writing to {self.fname}") + + json_data = json.dumps(data) + if self.gzip_compress: + self.output.write((json_data + "\n").encode("utf-8")) + else: + self.output.write(json_data + "\n") + + self.check_date_limit(data) + if self.do_stop: + return + + self.startingup = False + + def on_finish(self): + print(f"Written {self.counter} Tweets") + if self.output: + self.output.close() + + def do_continue(self): + if self.repeat == False: + return TweetHandlerI.do_continue(self) + + if self.do_stop: + # stop for a functional cause (e.g. date limit) + return False + + if self.counter == self.limit: + # repeat is True, thus close output file and + # create a new one + self._restart_file() + return True + + def _restart_file(self): + self.on_finish() + self.fname = self.timestamped_file() + self.startingup = True + self.counter = 0 diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/util.py b/venv/lib/python3.10/site-packages/nltk/twitter/util.py new file mode 100644 index 0000000000000000000000000000000000000000..adfa08853867280da85642778c3e9fb89a532574 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/twitter/util.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Authentication utilities to accompany `twitterclient`. +""" + +import os +import pprint + +from twython import Twython + + +def credsfromfile(creds_file=None, subdir=None, verbose=False): + """ + Convenience function for authentication + """ + return Authenticate().load_creds( + creds_file=creds_file, subdir=subdir, verbose=verbose + ) + + +class Authenticate: + """ + Methods for authenticating with Twitter. + """ + + def __init__(self): + self.creds_file = "credentials.txt" + self.creds_fullpath = None + + self.oauth = {} + try: + self.twitter_dir = os.environ["TWITTER"] + self.creds_subdir = self.twitter_dir + except KeyError: + self.twitter_dir = None + self.creds_subdir = None + + def load_creds(self, creds_file=None, subdir=None, verbose=False): + """ + Read OAuth credentials from a text file. + + File format for OAuth 1:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + oauth_token=OAUTH_TOKEN + oauth_token_secret=OAUTH_TOKEN_SECRET + + + File format for OAuth 2:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + access_token=ACCESS_TOKEN + + :param str file_name: File containing credentials. ``None`` (default) reads + data from `TWITTER/'credentials.txt'` + """ + if creds_file is not None: + self.creds_file = creds_file + + if subdir is None: + if self.creds_subdir is None: + msg = ( + "Supply a value to the 'subdir' parameter or" + + " set the TWITTER environment variable." + ) + raise ValueError(msg) + else: + self.creds_subdir = subdir + + self.creds_fullpath = os.path.normpath( + os.path.join(self.creds_subdir, self.creds_file) + ) + + if not os.path.isfile(self.creds_fullpath): + raise OSError(f"Cannot find file {self.creds_fullpath}") + + with open(self.creds_fullpath) as infile: + if verbose: + print(f"Reading credentials file {self.creds_fullpath}") + + for line in infile: + if "=" in line: + name, value = line.split("=", 1) + self.oauth[name.strip()] = value.strip() + + self._validate_creds_file(verbose=verbose) + + return self.oauth + + def _validate_creds_file(self, verbose=False): + """Check validity of a credentials file.""" + oauth1 = False + oauth1_keys = ["app_key", "app_secret", "oauth_token", "oauth_token_secret"] + oauth2 = False + oauth2_keys = ["app_key", "app_secret", "access_token"] + if all(k in self.oauth for k in oauth1_keys): + oauth1 = True + elif all(k in self.oauth for k in oauth2_keys): + oauth2 = True + + if not (oauth1 or oauth2): + msg = f"Missing or incorrect entries in {self.creds_file}\n" + msg += pprint.pformat(self.oauth) + raise ValueError(msg) + elif verbose: + print(f'Credentials file "{self.creds_file}" looks good') + + +def add_access_token(creds_file=None): + """ + For OAuth 2, retrieve an access token for an app and append it to a + credentials file. + """ + if creds_file is None: + path = os.path.dirname(__file__) + creds_file = os.path.join(path, "credentials2.txt") + oauth2 = credsfromfile(creds_file=creds_file) + app_key = oauth2["app_key"] + app_secret = oauth2["app_secret"] + + twitter = Twython(app_key, app_secret, oauth_version=2) + access_token = twitter.obtain_access_token() + tok = f"access_token={access_token}\n" + with open(creds_file, "a") as infile: + print(tok, file=infile) + + +def guess_path(pth): + """ + If the path is not absolute, guess that it is a subdirectory of the + user's home directory. + + :param str pth: The pathname of the directory where files of tweets should be written + """ + if os.path.isabs(pth): + return pth + else: + return os.path.expanduser(os.path.join("~", pth))