Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py +168 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py +88 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py +106 -0
- env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py +0 -0
env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc
ADDED
Binary file (1.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc
ADDED
Binary file (4.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.72 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc
ADDED
Binary file (4.25 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc
ADDED
Binary file (18.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc
ADDED
Binary file (6.91 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc
ADDED
Binary file (9.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc
ADDED
Binary file (5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc
ADDED
Binary file (18.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc
ADDED
Binary file (8.76 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc
ADDED
Binary file (3.78 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc
ADDED
Binary file (10 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc
ADDED
Binary file (16.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc
ADDED
Binary file (3.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc
ADDED
Binary file (4.15 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc
ADDED
Binary file (105 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc
ADDED
Binary file (3.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc
ADDED
Binary file (3.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc
ADDED
Binary file (6.02 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc
ADDED
Binary file (14.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc
ADDED
Binary file (15.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc
ADDED
Binary file (14.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc
ADDED
Binary file (13.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc
ADDED
Binary file (3.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc
ADDED
Binary file (4.48 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc
ADDED
Binary file (4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc
ADDED
Binary file (4.68 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc
ADDED
Binary file (12 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc
ADDED
Binary file (9.03 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc
ADDED
Binary file (5.39 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc
ADDED
Binary file (5.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc
ADDED
Binary file (1.94 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc
ADDED
Binary file (5.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc
ADDED
Binary file (16.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc
ADDED
Binary file (4.66 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc
ADDED
Binary file (23.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc
ADDED
Binary file (21.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc
ADDED
Binary file (6.21 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc
ADDED
Binary file (72.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Categorized Sentences Corpus Reader
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Pierpaolo Pantone <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
CorpusReader structured for corpora that contain one instance on each row.
|
10 |
+
This CorpusReader is specifically used for the Subjectivity Dataset and the
|
11 |
+
Sentence Polarity Dataset.
|
12 |
+
|
13 |
+
- Subjectivity Dataset information -
|
14 |
+
|
15 |
+
Authors: Bo Pang and Lillian Lee.
|
16 |
+
Url: https://www.cs.cornell.edu/people/pabo/movie-review-data
|
17 |
+
|
18 |
+
Distributed with permission.
|
19 |
+
|
20 |
+
Related papers:
|
21 |
+
|
22 |
+
- Bo Pang and Lillian Lee. "A Sentimental Education: Sentiment Analysis Using
|
23 |
+
Subjectivity Summarization Based on Minimum Cuts". Proceedings of the ACL,
|
24 |
+
2004.
|
25 |
+
|
26 |
+
- Sentence Polarity Dataset information -
|
27 |
+
|
28 |
+
Authors: Bo Pang and Lillian Lee.
|
29 |
+
Url: https://www.cs.cornell.edu/people/pabo/movie-review-data
|
30 |
+
|
31 |
+
Related papers:
|
32 |
+
|
33 |
+
- Bo Pang and Lillian Lee. "Seeing stars: Exploiting class relationships for
|
34 |
+
sentiment categorization with respect to rating scales". Proceedings of the
|
35 |
+
ACL, 2005.
|
36 |
+
"""
|
37 |
+
|
38 |
+
from nltk.corpus.reader.api import *
|
39 |
+
from nltk.tokenize import *
|
40 |
+
|
41 |
+
|
42 |
+
class CategorizedSentencesCorpusReader(CategorizedCorpusReader, CorpusReader):
|
43 |
+
"""
|
44 |
+
A reader for corpora in which each row represents a single instance, mainly
|
45 |
+
a sentence. Istances are divided into categories based on their file identifiers
|
46 |
+
(see CategorizedCorpusReader).
|
47 |
+
Since many corpora allow rows that contain more than one sentence, it is
|
48 |
+
possible to specify a sentence tokenizer to retrieve all sentences instead
|
49 |
+
than all rows.
|
50 |
+
|
51 |
+
Examples using the Subjectivity Dataset:
|
52 |
+
|
53 |
+
>>> from nltk.corpus import subjectivity
|
54 |
+
>>> subjectivity.sents()[23] # doctest: +NORMALIZE_WHITESPACE
|
55 |
+
['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits',
|
56 |
+
'happened', 'off', 'screen', '.']
|
57 |
+
>>> subjectivity.categories()
|
58 |
+
['obj', 'subj']
|
59 |
+
>>> subjectivity.words(categories='subj')
|
60 |
+
['smart', 'and', 'alert', ',', 'thirteen', ...]
|
61 |
+
|
62 |
+
Examples using the Sentence Polarity Dataset:
|
63 |
+
|
64 |
+
>>> from nltk.corpus import sentence_polarity
|
65 |
+
>>> sentence_polarity.sents() # doctest: +NORMALIZE_WHITESPACE
|
66 |
+
[['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish',
|
67 |
+
'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find',
|
68 |
+
'it', 'funny', '.'], ...]
|
69 |
+
>>> sentence_polarity.categories()
|
70 |
+
['neg', 'pos']
|
71 |
+
"""
|
72 |
+
|
73 |
+
CorpusView = StreamBackedCorpusView
|
74 |
+
|
75 |
+
def __init__(
|
76 |
+
self,
|
77 |
+
root,
|
78 |
+
fileids,
|
79 |
+
word_tokenizer=WhitespaceTokenizer(),
|
80 |
+
sent_tokenizer=None,
|
81 |
+
encoding="utf8",
|
82 |
+
**kwargs
|
83 |
+
):
|
84 |
+
"""
|
85 |
+
:param root: The root directory for the corpus.
|
86 |
+
:param fileids: a list or regexp specifying the fileids in the corpus.
|
87 |
+
:param word_tokenizer: a tokenizer for breaking sentences or paragraphs
|
88 |
+
into words. Default: `WhitespaceTokenizer`
|
89 |
+
:param sent_tokenizer: a tokenizer for breaking paragraphs into sentences.
|
90 |
+
:param encoding: the encoding that should be used to read the corpus.
|
91 |
+
:param kwargs: additional parameters passed to CategorizedCorpusReader.
|
92 |
+
"""
|
93 |
+
|
94 |
+
CorpusReader.__init__(self, root, fileids, encoding)
|
95 |
+
CategorizedCorpusReader.__init__(self, kwargs)
|
96 |
+
self._word_tokenizer = word_tokenizer
|
97 |
+
self._sent_tokenizer = sent_tokenizer
|
98 |
+
|
99 |
+
def sents(self, fileids=None, categories=None):
|
100 |
+
"""
|
101 |
+
Return all sentences in the corpus or in the specified file(s).
|
102 |
+
|
103 |
+
:param fileids: a list or regexp specifying the ids of the files whose
|
104 |
+
sentences have to be returned.
|
105 |
+
:param categories: a list specifying the categories whose sentences have
|
106 |
+
to be returned.
|
107 |
+
:return: the given file(s) as a list of sentences.
|
108 |
+
Each sentence is tokenized using the specified word_tokenizer.
|
109 |
+
:rtype: list(list(str))
|
110 |
+
"""
|
111 |
+
fileids = self._resolve(fileids, categories)
|
112 |
+
if fileids is None:
|
113 |
+
fileids = self._fileids
|
114 |
+
elif isinstance(fileids, str):
|
115 |
+
fileids = [fileids]
|
116 |
+
return concat(
|
117 |
+
[
|
118 |
+
self.CorpusView(path, self._read_sent_block, encoding=enc)
|
119 |
+
for (path, enc, fileid) in self.abspaths(fileids, True, True)
|
120 |
+
]
|
121 |
+
)
|
122 |
+
|
123 |
+
def words(self, fileids=None, categories=None):
|
124 |
+
"""
|
125 |
+
Return all words and punctuation symbols in the corpus or in the specified
|
126 |
+
file(s).
|
127 |
+
|
128 |
+
:param fileids: a list or regexp specifying the ids of the files whose
|
129 |
+
words have to be returned.
|
130 |
+
:param categories: a list specifying the categories whose words have to
|
131 |
+
be returned.
|
132 |
+
:return: the given file(s) as a list of words and punctuation symbols.
|
133 |
+
:rtype: list(str)
|
134 |
+
"""
|
135 |
+
fileids = self._resolve(fileids, categories)
|
136 |
+
if fileids is None:
|
137 |
+
fileids = self._fileids
|
138 |
+
elif isinstance(fileids, str):
|
139 |
+
fileids = [fileids]
|
140 |
+
return concat(
|
141 |
+
[
|
142 |
+
self.CorpusView(path, self._read_word_block, encoding=enc)
|
143 |
+
for (path, enc, fileid) in self.abspaths(fileids, True, True)
|
144 |
+
]
|
145 |
+
)
|
146 |
+
|
147 |
+
def _read_sent_block(self, stream):
|
148 |
+
sents = []
|
149 |
+
for i in range(20): # Read 20 lines at a time.
|
150 |
+
line = stream.readline()
|
151 |
+
if not line:
|
152 |
+
continue
|
153 |
+
if self._sent_tokenizer:
|
154 |
+
sents.extend(
|
155 |
+
[
|
156 |
+
self._word_tokenizer.tokenize(sent)
|
157 |
+
for sent in self._sent_tokenizer.tokenize(line)
|
158 |
+
]
|
159 |
+
)
|
160 |
+
else:
|
161 |
+
sents.append(self._word_tokenizer.tokenize(line))
|
162 |
+
return sents
|
163 |
+
|
164 |
+
def _read_word_block(self, stream):
|
165 |
+
words = []
|
166 |
+
for sent in self._read_sent_block(stream):
|
167 |
+
words.extend(sent)
|
168 |
+
return words
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Carnegie Mellon Pronouncing Dictionary Corpus Reader
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
|
10 |
+
ftp://ftp.cs.cmu.edu/project/speech/dict/
|
11 |
+
Copyright 1998 Carnegie Mellon University
|
12 |
+
|
13 |
+
File Format: Each line consists of an uppercased word, a counter
|
14 |
+
(for alternative pronunciations), and a transcription. Vowels are
|
15 |
+
marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
|
16 |
+
NATURAL 1 N AE1 CH ER0 AH0 L
|
17 |
+
|
18 |
+
The dictionary contains 127069 entries. Of these, 119400 words are assigned
|
19 |
+
a unique pronunciation, 6830 words have two pronunciations, and 839 words have
|
20 |
+
three or more pronunciations. Many of these are fast-speech variants.
|
21 |
+
|
22 |
+
Phonemes: There are 39 phonemes, as shown below:
|
23 |
+
|
24 |
+
Phoneme Example Translation Phoneme Example Translation
|
25 |
+
------- ------- ----------- ------- ------- -----------
|
26 |
+
AA odd AA D AE at AE T
|
27 |
+
AH hut HH AH T AO ought AO T
|
28 |
+
AW cow K AW AY hide HH AY D
|
29 |
+
B be B IY CH cheese CH IY Z
|
30 |
+
D dee D IY DH thee DH IY
|
31 |
+
EH Ed EH D ER hurt HH ER T
|
32 |
+
EY ate EY T F fee F IY
|
33 |
+
G green G R IY N HH he HH IY
|
34 |
+
IH it IH T IY eat IY T
|
35 |
+
JH gee JH IY K key K IY
|
36 |
+
L lee L IY M me M IY
|
37 |
+
N knee N IY NG ping P IH NG
|
38 |
+
OW oat OW T OY toy T OY
|
39 |
+
P pee P IY R read R IY D
|
40 |
+
S sea S IY SH she SH IY
|
41 |
+
T tea T IY TH theta TH EY T AH
|
42 |
+
UH hood HH UH D UW two T UW
|
43 |
+
V vee V IY W we W IY
|
44 |
+
Y yield Y IY L D Z zee Z IY
|
45 |
+
ZH seizure S IY ZH ER
|
46 |
+
"""
|
47 |
+
|
48 |
+
from nltk.corpus.reader.api import *
|
49 |
+
from nltk.corpus.reader.util import *
|
50 |
+
from nltk.util import Index
|
51 |
+
|
52 |
+
|
53 |
+
class CMUDictCorpusReader(CorpusReader):
|
54 |
+
def entries(self):
|
55 |
+
"""
|
56 |
+
:return: the cmudict lexicon as a list of entries
|
57 |
+
containing (word, transcriptions) tuples.
|
58 |
+
"""
|
59 |
+
return concat(
|
60 |
+
[
|
61 |
+
StreamBackedCorpusView(fileid, read_cmudict_block, encoding=enc)
|
62 |
+
for fileid, enc in self.abspaths(None, True)
|
63 |
+
]
|
64 |
+
)
|
65 |
+
|
66 |
+
def words(self):
|
67 |
+
"""
|
68 |
+
:return: a list of all words defined in the cmudict lexicon.
|
69 |
+
"""
|
70 |
+
return [word.lower() for (word, _) in self.entries()]
|
71 |
+
|
72 |
+
def dict(self):
|
73 |
+
"""
|
74 |
+
:return: the cmudict lexicon as a dictionary, whose keys are
|
75 |
+
lowercase words and whose values are lists of pronunciations.
|
76 |
+
"""
|
77 |
+
return dict(Index(self.entries()))
|
78 |
+
|
79 |
+
|
80 |
+
def read_cmudict_block(stream):
|
81 |
+
entries = []
|
82 |
+
while len(entries) < 100: # Read 100 at a time.
|
83 |
+
line = stream.readline()
|
84 |
+
if line == "":
|
85 |
+
return entries # end of file.
|
86 |
+
pieces = line.split()
|
87 |
+
entries.append((pieces[0].lower(), pieces[2:]))
|
88 |
+
return entries
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: An Crubadan N-grams Reader
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Avital Pekker <[email protected]>
|
5 |
+
#
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
An NLTK interface for the n-gram statistics gathered from
|
11 |
+
the corpora for each language using An Crubadan.
|
12 |
+
|
13 |
+
There are multiple potential applications for the data but
|
14 |
+
this reader was created with the goal of using it in the
|
15 |
+
context of language identification.
|
16 |
+
|
17 |
+
For details about An Crubadan, this data, and its potential uses, see:
|
18 |
+
http://borel.slu.edu/crubadan/index.html
|
19 |
+
"""
|
20 |
+
|
21 |
+
import re
|
22 |
+
from os import path
|
23 |
+
|
24 |
+
from nltk.corpus.reader import CorpusReader
|
25 |
+
from nltk.data import ZipFilePathPointer
|
26 |
+
from nltk.probability import FreqDist
|
27 |
+
|
28 |
+
|
29 |
+
class CrubadanCorpusReader(CorpusReader):
|
30 |
+
"""
|
31 |
+
A corpus reader used to access language An Crubadan n-gram files.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_LANG_MAPPER_FILE = "table.txt"
|
35 |
+
_all_lang_freq = {}
|
36 |
+
|
37 |
+
def __init__(self, root, fileids, encoding="utf8", tagset=None):
|
38 |
+
super().__init__(root, fileids, encoding="utf8")
|
39 |
+
self._lang_mapping_data = []
|
40 |
+
self._load_lang_mapping_data()
|
41 |
+
|
42 |
+
def lang_freq(self, lang):
|
43 |
+
"""Return n-gram FreqDist for a specific language
|
44 |
+
given ISO 639-3 language code"""
|
45 |
+
|
46 |
+
if lang not in self._all_lang_freq:
|
47 |
+
self._all_lang_freq[lang] = self._load_lang_ngrams(lang)
|
48 |
+
|
49 |
+
return self._all_lang_freq[lang]
|
50 |
+
|
51 |
+
def langs(self):
|
52 |
+
"""Return a list of supported languages as ISO 639-3 codes"""
|
53 |
+
return [row[1] for row in self._lang_mapping_data]
|
54 |
+
|
55 |
+
def iso_to_crubadan(self, lang):
|
56 |
+
"""Return internal Crubadan code based on ISO 639-3 code"""
|
57 |
+
for i in self._lang_mapping_data:
|
58 |
+
if i[1].lower() == lang.lower():
|
59 |
+
return i[0]
|
60 |
+
|
61 |
+
def crubadan_to_iso(self, lang):
|
62 |
+
"""Return ISO 639-3 code given internal Crubadan code"""
|
63 |
+
for i in self._lang_mapping_data:
|
64 |
+
if i[0].lower() == lang.lower():
|
65 |
+
return i[1]
|
66 |
+
|
67 |
+
def _load_lang_mapping_data(self):
|
68 |
+
"""Load language mappings between codes and description from table.txt"""
|
69 |
+
if isinstance(self.root, ZipFilePathPointer):
|
70 |
+
raise RuntimeError(
|
71 |
+
"Please install the 'crubadan' corpus first, use nltk.download()"
|
72 |
+
)
|
73 |
+
|
74 |
+
mapper_file = path.join(self.root, self._LANG_MAPPER_FILE)
|
75 |
+
if self._LANG_MAPPER_FILE not in self.fileids():
|
76 |
+
raise RuntimeError("Could not find language mapper file: " + mapper_file)
|
77 |
+
|
78 |
+
with open(mapper_file, encoding="utf-8") as raw:
|
79 |
+
strip_raw = raw.read().strip()
|
80 |
+
|
81 |
+
self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")]
|
82 |
+
|
83 |
+
def _load_lang_ngrams(self, lang):
|
84 |
+
"""Load single n-gram language file given the ISO 639-3 language code
|
85 |
+
and return its FreqDist"""
|
86 |
+
|
87 |
+
if lang not in self.langs():
|
88 |
+
raise RuntimeError("Unsupported language.")
|
89 |
+
|
90 |
+
crubadan_code = self.iso_to_crubadan(lang)
|
91 |
+
ngram_file = path.join(self.root, crubadan_code + "-3grams.txt")
|
92 |
+
|
93 |
+
if not path.isfile(ngram_file):
|
94 |
+
raise RuntimeError("No N-gram file found for requested language.")
|
95 |
+
|
96 |
+
counts = FreqDist()
|
97 |
+
with open(ngram_file, encoding="utf-8") as f:
|
98 |
+
for line in f:
|
99 |
+
data = line.split(" ")
|
100 |
+
|
101 |
+
ngram = data[1].strip("\n")
|
102 |
+
freq = int(data[0])
|
103 |
+
|
104 |
+
counts[ngram] = freq
|
105 |
+
|
106 |
+
return counts
|
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|