Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/6.input_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/nltk/inference/__init__.py +24 -0
- venv/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/nltk/inference/discourse.py +651 -0
- venv/lib/python3.10/site-packages/nltk/inference/mace.py +383 -0
- venv/lib/python3.10/site-packages/nltk/inference/prover9.py +508 -0
- venv/lib/python3.10/site-packages/nltk/inference/tableau.py +712 -0
- venv/lib/python3.10/site-packages/nltk/test/__init__.py +18 -0
- venv/lib/python3.10/site-packages/nltk/test/all.py +25 -0
- venv/lib/python3.10/site-packages/nltk/test/bleu.doctest +29 -0
- venv/lib/python3.10/site-packages/nltk/test/bnc.doctest +60 -0
- venv/lib/python3.10/site-packages/nltk/test/ccg.doctest +376 -0
- venv/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest +552 -0
- venv/lib/python3.10/site-packages/nltk/test/chat80.doctest +232 -0
- venv/lib/python3.10/site-packages/nltk/test/childes.doctest +190 -0
- venv/lib/python3.10/site-packages/nltk/test/childes_fixt.py +13 -0
- venv/lib/python3.10/site-packages/nltk/test/chunk.doctest +372 -0
- venv/lib/python3.10/site-packages/nltk/test/classify.doctest +202 -0
- venv/lib/python3.10/site-packages/nltk/test/classify_fixt.py +5 -0
- venv/lib/python3.10/site-packages/nltk/test/collections.doctest +31 -0
- venv/lib/python3.10/site-packages/nltk/test/collocations.doctest +307 -0
- venv/lib/python3.10/site-packages/nltk/test/concordance.doctest +75 -0
- venv/lib/python3.10/site-packages/nltk/test/conftest.py +33 -0
- venv/lib/python3.10/site-packages/nltk/test/corpus.doctest +0 -0
- venv/lib/python3.10/site-packages/nltk/test/crubadan.doctest +65 -0
- venv/lib/python3.10/site-packages/nltk/test/data.doctest +387 -0
- venv/lib/python3.10/site-packages/nltk/test/dependency.doctest +241 -0
- venv/lib/python3.10/site-packages/nltk/test/discourse.doctest +552 -0
- venv/lib/python3.10/site-packages/nltk/test/drt.doctest +515 -0
- venv/lib/python3.10/site-packages/nltk/test/featgram.doctest +610 -0
- venv/lib/python3.10/site-packages/nltk/test/featstruct.doctest +1229 -0
- venv/lib/python3.10/site-packages/nltk/test/framenet.doctest +288 -0
- venv/lib/python3.10/site-packages/nltk/test/generate.doctest +78 -0
- venv/lib/python3.10/site-packages/nltk/test/gensim.doctest +141 -0
- venv/lib/python3.10/site-packages/nltk/test/gensim_fixt.py +4 -0
- venv/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest +383 -0
- venv/lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest +69 -0
- venv/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py +9 -0
- venv/lib/python3.10/site-packages/nltk/test/grammar.doctest +69 -0
- venv/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest +109 -0
- venv/lib/python3.10/site-packages/nltk/test/index.doctest +100 -0
- venv/lib/python3.10/site-packages/nltk/test/inference.doctest +536 -0
- venv/lib/python3.10/site-packages/nltk/test/internals.doctest +161 -0
ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f09dc5e3d1f2fe00de5384020d14c0ee5fc3fa0bab27d7faefaa38bd2363308
|
3 |
+
size 16778396
|
ckpts/universal/global_step40/zero/6.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b074d879d2851f59ad55ac4cab24e0ed9d2d60b6f4f5bca4ffb03065da84fd47
|
3 |
+
size 9293
|
ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a41a58055098f1e089395656f9f2e10d72480b0ea255e5b8678079a7ff642450
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/9.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05242d2dd26391d8ddc050af5a04fdbe12fd8abb28b40782e791565f7d7f37bd
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/nltk/inference/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Inference
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Dan Garrette <[email protected]>
|
5 |
+
# Ewan Klein <[email protected]>
|
6 |
+
#
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
"""
|
11 |
+
Classes and interfaces for theorem proving and model building.
|
12 |
+
"""
|
13 |
+
|
14 |
+
from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand
|
15 |
+
from nltk.inference.discourse import (
|
16 |
+
CfgReadingCommand,
|
17 |
+
DiscourseTester,
|
18 |
+
DrtGlueReadingCommand,
|
19 |
+
ReadingCommand,
|
20 |
+
)
|
21 |
+
from nltk.inference.mace import Mace, MaceCommand
|
22 |
+
from nltk.inference.prover9 import Prover9, Prover9Command
|
23 |
+
from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand
|
24 |
+
from nltk.inference.tableau import TableauProver, TableauProverCommand
|
venv/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc
ADDED
Binary file (19.1 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc
ADDED
Binary file (20.8 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc
ADDED
Binary file (10.6 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc
ADDED
Binary file (21.4 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc
ADDED
Binary file (18.2 kB). View file
|
|
venv/lib/python3.10/site-packages/nltk/inference/discourse.py
ADDED
@@ -0,0 +1,651 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Discourse Processing
|
2 |
+
#
|
3 |
+
# Author: Ewan Klein <[email protected]>
|
4 |
+
# Dan Garrette <[email protected]>
|
5 |
+
#
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
r"""
|
10 |
+
Module for incrementally developing simple discourses, and checking for semantic ambiguity,
|
11 |
+
consistency and informativeness.
|
12 |
+
|
13 |
+
Many of the ideas are based on the CURT family of programs of Blackburn and Bos
|
14 |
+
(see http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html).
|
15 |
+
|
16 |
+
Consistency checking is carried out by using the ``mace`` module to call the Mace4 model builder.
|
17 |
+
Informativeness checking is carried out with a call to ``Prover.prove()`` from
|
18 |
+
the ``inference`` module.
|
19 |
+
|
20 |
+
``DiscourseTester`` is a constructor for discourses.
|
21 |
+
The basic data structure is a list of sentences, stored as ``self._sentences``. Each sentence in the list
|
22 |
+
is assigned a "sentence ID" (``sid``) of the form ``s``\ *i*. For example::
|
23 |
+
|
24 |
+
s0: A boxer walks
|
25 |
+
s1: Every boxer chases a girl
|
26 |
+
|
27 |
+
Each sentence can be ambiguous between a number of readings, each of which receives a
|
28 |
+
"reading ID" (``rid``) of the form ``s``\ *i* -``r``\ *j*. For example::
|
29 |
+
|
30 |
+
s0 readings:
|
31 |
+
|
32 |
+
s0-r1: some x.(boxer(x) & walk(x))
|
33 |
+
s0-r0: some x.(boxerdog(x) & walk(x))
|
34 |
+
|
35 |
+
A "thread" is a list of readings, represented as a list of ``rid``\ s.
|
36 |
+
Each thread receives a "thread ID" (``tid``) of the form ``d``\ *i*.
|
37 |
+
For example::
|
38 |
+
|
39 |
+
d0: ['s0-r0', 's1-r0']
|
40 |
+
|
41 |
+
The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences.
|
42 |
+
(This is not intended to scale beyond very short discourses!) The method ``readings(filter=True)`` will only show
|
43 |
+
those threads which are consistent (taking into account any background assumptions).
|
44 |
+
"""
|
45 |
+
|
46 |
+
import os
|
47 |
+
from abc import ABCMeta, abstractmethod
|
48 |
+
from functools import reduce
|
49 |
+
from operator import add, and_
|
50 |
+
|
51 |
+
from nltk.data import show_cfg
|
52 |
+
from nltk.inference.mace import MaceCommand
|
53 |
+
from nltk.inference.prover9 import Prover9Command
|
54 |
+
from nltk.parse import load_parser
|
55 |
+
from nltk.parse.malt import MaltParser
|
56 |
+
from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora
|
57 |
+
from nltk.sem.glue import DrtGlue
|
58 |
+
from nltk.sem.logic import Expression
|
59 |
+
from nltk.tag import RegexpTagger
|
60 |
+
|
61 |
+
|
62 |
+
class ReadingCommand(metaclass=ABCMeta):
|
63 |
+
@abstractmethod
|
64 |
+
def parse_to_readings(self, sentence):
|
65 |
+
"""
|
66 |
+
:param sentence: the sentence to read
|
67 |
+
:type sentence: str
|
68 |
+
"""
|
69 |
+
|
70 |
+
def process_thread(self, sentence_readings):
|
71 |
+
"""
|
72 |
+
This method should be used to handle dependencies between readings such
|
73 |
+
as resolving anaphora.
|
74 |
+
|
75 |
+
:param sentence_readings: readings to process
|
76 |
+
:type sentence_readings: list(Expression)
|
77 |
+
:return: the list of readings after processing
|
78 |
+
:rtype: list(Expression)
|
79 |
+
"""
|
80 |
+
return sentence_readings
|
81 |
+
|
82 |
+
@abstractmethod
|
83 |
+
def combine_readings(self, readings):
|
84 |
+
"""
|
85 |
+
:param readings: readings to combine
|
86 |
+
:type readings: list(Expression)
|
87 |
+
:return: one combined reading
|
88 |
+
:rtype: Expression
|
89 |
+
"""
|
90 |
+
|
91 |
+
@abstractmethod
|
92 |
+
def to_fol(self, expression):
|
93 |
+
"""
|
94 |
+
Convert this expression into a First-Order Logic expression.
|
95 |
+
|
96 |
+
:param expression: an expression
|
97 |
+
:type expression: Expression
|
98 |
+
:return: a FOL version of the input expression
|
99 |
+
:rtype: Expression
|
100 |
+
"""
|
101 |
+
|
102 |
+
|
103 |
+
class CfgReadingCommand(ReadingCommand):
|
104 |
+
def __init__(self, gramfile=None):
|
105 |
+
"""
|
106 |
+
:param gramfile: name of file where grammar can be loaded
|
107 |
+
:type gramfile: str
|
108 |
+
"""
|
109 |
+
self._gramfile = (
|
110 |
+
gramfile if gramfile else "grammars/book_grammars/discourse.fcfg"
|
111 |
+
)
|
112 |
+
self._parser = load_parser(self._gramfile)
|
113 |
+
|
114 |
+
def parse_to_readings(self, sentence):
|
115 |
+
""":see: ReadingCommand.parse_to_readings()"""
|
116 |
+
from nltk.sem import root_semrep
|
117 |
+
|
118 |
+
tokens = sentence.split()
|
119 |
+
trees = self._parser.parse(tokens)
|
120 |
+
return [root_semrep(tree) for tree in trees]
|
121 |
+
|
122 |
+
def combine_readings(self, readings):
|
123 |
+
""":see: ReadingCommand.combine_readings()"""
|
124 |
+
return reduce(and_, readings)
|
125 |
+
|
126 |
+
def to_fol(self, expression):
|
127 |
+
""":see: ReadingCommand.to_fol()"""
|
128 |
+
return expression
|
129 |
+
|
130 |
+
|
131 |
+
class DrtGlueReadingCommand(ReadingCommand):
|
132 |
+
def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None):
|
133 |
+
"""
|
134 |
+
:param semtype_file: name of file where grammar can be loaded
|
135 |
+
:param remove_duplicates: should duplicates be removed?
|
136 |
+
:param depparser: the dependency parser
|
137 |
+
"""
|
138 |
+
if semtype_file is None:
|
139 |
+
semtype_file = os.path.join(
|
140 |
+
"grammars", "sample_grammars", "drt_glue.semtype"
|
141 |
+
)
|
142 |
+
self._glue = DrtGlue(
|
143 |
+
semtype_file=semtype_file,
|
144 |
+
remove_duplicates=remove_duplicates,
|
145 |
+
depparser=depparser,
|
146 |
+
)
|
147 |
+
|
148 |
+
def parse_to_readings(self, sentence):
|
149 |
+
""":see: ReadingCommand.parse_to_readings()"""
|
150 |
+
return self._glue.parse_to_meaning(sentence)
|
151 |
+
|
152 |
+
def process_thread(self, sentence_readings):
|
153 |
+
""":see: ReadingCommand.process_thread()"""
|
154 |
+
try:
|
155 |
+
return [self.combine_readings(sentence_readings)]
|
156 |
+
except AnaphoraResolutionException:
|
157 |
+
return []
|
158 |
+
|
159 |
+
def combine_readings(self, readings):
|
160 |
+
""":see: ReadingCommand.combine_readings()"""
|
161 |
+
thread_reading = reduce(add, readings)
|
162 |
+
return resolve_anaphora(thread_reading.simplify())
|
163 |
+
|
164 |
+
def to_fol(self, expression):
|
165 |
+
""":see: ReadingCommand.to_fol()"""
|
166 |
+
return expression.fol()
|
167 |
+
|
168 |
+
|
169 |
+
class DiscourseTester:
|
170 |
+
"""
|
171 |
+
Check properties of an ongoing discourse.
|
172 |
+
"""
|
173 |
+
|
174 |
+
def __init__(self, input, reading_command=None, background=None):
|
175 |
+
"""
|
176 |
+
Initialize a ``DiscourseTester``.
|
177 |
+
|
178 |
+
:param input: the discourse sentences
|
179 |
+
:type input: list of str
|
180 |
+
:param background: Formulas which express background assumptions
|
181 |
+
:type background: list(Expression)
|
182 |
+
"""
|
183 |
+
self._input = input
|
184 |
+
self._sentences = {"s%s" % i: sent for i, sent in enumerate(input)}
|
185 |
+
self._models = None
|
186 |
+
self._readings = {}
|
187 |
+
self._reading_command = (
|
188 |
+
reading_command if reading_command else CfgReadingCommand()
|
189 |
+
)
|
190 |
+
self._threads = {}
|
191 |
+
self._filtered_threads = {}
|
192 |
+
if background is not None:
|
193 |
+
from nltk.sem.logic import Expression
|
194 |
+
|
195 |
+
for e in background:
|
196 |
+
assert isinstance(e, Expression)
|
197 |
+
self._background = background
|
198 |
+
else:
|
199 |
+
self._background = []
|
200 |
+
|
201 |
+
###############################
|
202 |
+
# Sentences
|
203 |
+
###############################
|
204 |
+
|
205 |
+
def sentences(self):
|
206 |
+
"""
|
207 |
+
Display the list of sentences in the current discourse.
|
208 |
+
"""
|
209 |
+
for id in sorted(self._sentences):
|
210 |
+
print(f"{id}: {self._sentences[id]}")
|
211 |
+
|
212 |
+
def add_sentence(self, sentence, informchk=False, consistchk=False):
|
213 |
+
"""
|
214 |
+
Add a sentence to the current discourse.
|
215 |
+
|
216 |
+
Updates ``self._input`` and ``self._sentences``.
|
217 |
+
:param sentence: An input sentence
|
218 |
+
:type sentence: str
|
219 |
+
:param informchk: if ``True``, check that the result of adding the sentence is thread-informative. Updates ``self._readings``.
|
220 |
+
:param consistchk: if ``True``, check that the result of adding the sentence is thread-consistent. Updates ``self._readings``.
|
221 |
+
|
222 |
+
"""
|
223 |
+
# check whether the new sentence is informative (i.e. not entailed by the previous discourse)
|
224 |
+
if informchk:
|
225 |
+
self.readings(verbose=False)
|
226 |
+
for tid in sorted(self._threads):
|
227 |
+
assumptions = [reading for (rid, reading) in self.expand_threads(tid)]
|
228 |
+
assumptions += self._background
|
229 |
+
for sent_reading in self._get_readings(sentence):
|
230 |
+
tp = Prover9Command(goal=sent_reading, assumptions=assumptions)
|
231 |
+
if tp.prove():
|
232 |
+
print(
|
233 |
+
"Sentence '%s' under reading '%s':"
|
234 |
+
% (sentence, str(sent_reading))
|
235 |
+
)
|
236 |
+
print("Not informative relative to thread '%s'" % tid)
|
237 |
+
|
238 |
+
self._input.append(sentence)
|
239 |
+
self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)}
|
240 |
+
# check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of
|
241 |
+
# of assumptions
|
242 |
+
if consistchk:
|
243 |
+
self.readings(verbose=False)
|
244 |
+
self.models(show=False)
|
245 |
+
|
246 |
+
def retract_sentence(self, sentence, verbose=True):
|
247 |
+
"""
|
248 |
+
Remove a sentence from the current discourse.
|
249 |
+
|
250 |
+
Updates ``self._input``, ``self._sentences`` and ``self._readings``.
|
251 |
+
:param sentence: An input sentence
|
252 |
+
:type sentence: str
|
253 |
+
:param verbose: If ``True``, report on the updated list of sentences.
|
254 |
+
"""
|
255 |
+
try:
|
256 |
+
self._input.remove(sentence)
|
257 |
+
except ValueError:
|
258 |
+
print(
|
259 |
+
"Retraction failed. The sentence '%s' is not part of the current discourse:"
|
260 |
+
% sentence
|
261 |
+
)
|
262 |
+
self.sentences()
|
263 |
+
return None
|
264 |
+
self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)}
|
265 |
+
self.readings(verbose=False)
|
266 |
+
if verbose:
|
267 |
+
print("Current sentences are ")
|
268 |
+
self.sentences()
|
269 |
+
|
270 |
+
def grammar(self):
|
271 |
+
"""
|
272 |
+
Print out the grammar in use for parsing input sentences
|
273 |
+
"""
|
274 |
+
show_cfg(self._reading_command._gramfile)
|
275 |
+
|
276 |
+
###############################
|
277 |
+
# Readings and Threads
|
278 |
+
###############################
|
279 |
+
|
280 |
+
def _get_readings(self, sentence):
|
281 |
+
"""
|
282 |
+
Build a list of semantic readings for a sentence.
|
283 |
+
|
284 |
+
:rtype: list(Expression)
|
285 |
+
"""
|
286 |
+
return self._reading_command.parse_to_readings(sentence)
|
287 |
+
|
288 |
+
def _construct_readings(self):
|
289 |
+
"""
|
290 |
+
Use ``self._sentences`` to construct a value for ``self._readings``.
|
291 |
+
"""
|
292 |
+
# re-initialize self._readings in case we have retracted a sentence
|
293 |
+
self._readings = {}
|
294 |
+
for sid in sorted(self._sentences):
|
295 |
+
sentence = self._sentences[sid]
|
296 |
+
readings = self._get_readings(sentence)
|
297 |
+
self._readings[sid] = {
|
298 |
+
f"{sid}-r{rid}": reading.simplify()
|
299 |
+
for rid, reading in enumerate(sorted(readings, key=str))
|
300 |
+
}
|
301 |
+
|
302 |
+
def _construct_threads(self):
|
303 |
+
"""
|
304 |
+
Use ``self._readings`` to construct a value for ``self._threads``
|
305 |
+
and use the model builder to construct a value for ``self._filtered_threads``
|
306 |
+
"""
|
307 |
+
thread_list = [[]]
|
308 |
+
for sid in sorted(self._readings):
|
309 |
+
thread_list = self.multiply(thread_list, sorted(self._readings[sid]))
|
310 |
+
self._threads = {"d%s" % tid: thread for tid, thread in enumerate(thread_list)}
|
311 |
+
# re-initialize the filtered threads
|
312 |
+
self._filtered_threads = {}
|
313 |
+
# keep the same ids, but only include threads which get models
|
314 |
+
consistency_checked = self._check_consistency(self._threads)
|
315 |
+
for (tid, thread) in self._threads.items():
|
316 |
+
if (tid, True) in consistency_checked:
|
317 |
+
self._filtered_threads[tid] = thread
|
318 |
+
|
319 |
+
def _show_readings(self, sentence=None):
|
320 |
+
"""
|
321 |
+
Print out the readings for the discourse (or a single sentence).
|
322 |
+
"""
|
323 |
+
if sentence is not None:
|
324 |
+
print("The sentence '%s' has these readings:" % sentence)
|
325 |
+
for r in [str(reading) for reading in (self._get_readings(sentence))]:
|
326 |
+
print(" %s" % r)
|
327 |
+
else:
|
328 |
+
for sid in sorted(self._readings):
|
329 |
+
print()
|
330 |
+
print("%s readings:" % sid)
|
331 |
+
print() #'-' * 30
|
332 |
+
for rid in sorted(self._readings[sid]):
|
333 |
+
lf = self._readings[sid][rid]
|
334 |
+
print(f"{rid}: {lf.normalize()}")
|
335 |
+
|
336 |
+
def _show_threads(self, filter=False, show_thread_readings=False):
|
337 |
+
"""
|
338 |
+
Print out the value of ``self._threads`` or ``self._filtered_hreads``
|
339 |
+
"""
|
340 |
+
threads = self._filtered_threads if filter else self._threads
|
341 |
+
for tid in sorted(threads):
|
342 |
+
if show_thread_readings:
|
343 |
+
readings = [
|
344 |
+
self._readings[rid.split("-")[0]][rid] for rid in self._threads[tid]
|
345 |
+
]
|
346 |
+
try:
|
347 |
+
thread_reading = (
|
348 |
+
": %s"
|
349 |
+
% self._reading_command.combine_readings(readings).normalize()
|
350 |
+
)
|
351 |
+
except Exception as e:
|
352 |
+
thread_reading = ": INVALID: %s" % e.__class__.__name__
|
353 |
+
else:
|
354 |
+
thread_reading = ""
|
355 |
+
|
356 |
+
print("%s:" % tid, self._threads[tid], thread_reading)
|
357 |
+
|
358 |
+
def readings(
|
359 |
+
self,
|
360 |
+
sentence=None,
|
361 |
+
threaded=False,
|
362 |
+
verbose=True,
|
363 |
+
filter=False,
|
364 |
+
show_thread_readings=False,
|
365 |
+
):
|
366 |
+
"""
|
367 |
+
Construct and show the readings of the discourse (or of a single sentence).
|
368 |
+
|
369 |
+
:param sentence: test just this sentence
|
370 |
+
:type sentence: str
|
371 |
+
:param threaded: if ``True``, print out each thread ID and the corresponding thread.
|
372 |
+
:param filter: if ``True``, only print out consistent thread IDs and threads.
|
373 |
+
"""
|
374 |
+
self._construct_readings()
|
375 |
+
self._construct_threads()
|
376 |
+
|
377 |
+
# if we are filtering or showing thread readings, show threads
|
378 |
+
if filter or show_thread_readings:
|
379 |
+
threaded = True
|
380 |
+
|
381 |
+
if verbose:
|
382 |
+
if not threaded:
|
383 |
+
self._show_readings(sentence=sentence)
|
384 |
+
else:
|
385 |
+
self._show_threads(
|
386 |
+
filter=filter, show_thread_readings=show_thread_readings
|
387 |
+
)
|
388 |
+
|
389 |
+
def expand_threads(self, thread_id, threads=None):
|
390 |
+
"""
|
391 |
+
Given a thread ID, find the list of ``logic.Expression`` objects corresponding to the reading IDs in that thread.
|
392 |
+
|
393 |
+
:param thread_id: thread ID
|
394 |
+
:type thread_id: str
|
395 |
+
:param threads: a mapping from thread IDs to lists of reading IDs
|
396 |
+
:type threads: dict
|
397 |
+
:return: A list of pairs ``(rid, reading)`` where reading is the ``logic.Expression`` associated with a reading ID
|
398 |
+
:rtype: list of tuple
|
399 |
+
"""
|
400 |
+
if threads is None:
|
401 |
+
threads = self._threads
|
402 |
+
return [
|
403 |
+
(rid, self._readings[sid][rid])
|
404 |
+
for rid in threads[thread_id]
|
405 |
+
for sid in rid.split("-")[:1]
|
406 |
+
]
|
407 |
+
|
408 |
+
###############################
|
409 |
+
# Models and Background
|
410 |
+
###############################
|
411 |
+
|
412 |
+
def _check_consistency(self, threads, show=False, verbose=False):
|
413 |
+
results = []
|
414 |
+
for tid in sorted(threads):
|
415 |
+
assumptions = [
|
416 |
+
reading for (rid, reading) in self.expand_threads(tid, threads=threads)
|
417 |
+
]
|
418 |
+
assumptions = list(
|
419 |
+
map(
|
420 |
+
self._reading_command.to_fol,
|
421 |
+
self._reading_command.process_thread(assumptions),
|
422 |
+
)
|
423 |
+
)
|
424 |
+
if assumptions:
|
425 |
+
assumptions += self._background
|
426 |
+
# if Mace4 finds a model, it always seems to find it quickly
|
427 |
+
mb = MaceCommand(None, assumptions, max_models=20)
|
428 |
+
modelfound = mb.build_model()
|
429 |
+
else:
|
430 |
+
modelfound = False
|
431 |
+
results.append((tid, modelfound))
|
432 |
+
if show:
|
433 |
+
spacer(80)
|
434 |
+
print("Model for Discourse Thread %s" % tid)
|
435 |
+
spacer(80)
|
436 |
+
if verbose:
|
437 |
+
for a in assumptions:
|
438 |
+
print(a)
|
439 |
+
spacer(80)
|
440 |
+
if modelfound:
|
441 |
+
print(mb.model(format="cooked"))
|
442 |
+
else:
|
443 |
+
print("No model found!\n")
|
444 |
+
return results
|
445 |
+
|
446 |
+
def models(self, thread_id=None, show=True, verbose=False):
|
447 |
+
"""
|
448 |
+
Call Mace4 to build a model for each current discourse thread.
|
449 |
+
|
450 |
+
:param thread_id: thread ID
|
451 |
+
:type thread_id: str
|
452 |
+
:param show: If ``True``, display the model that has been found.
|
453 |
+
"""
|
454 |
+
self._construct_readings()
|
455 |
+
self._construct_threads()
|
456 |
+
threads = {thread_id: self._threads[thread_id]} if thread_id else self._threads
|
457 |
+
|
458 |
+
for (tid, modelfound) in self._check_consistency(
|
459 |
+
threads, show=show, verbose=verbose
|
460 |
+
):
|
461 |
+
idlist = [rid for rid in threads[tid]]
|
462 |
+
|
463 |
+
if not modelfound:
|
464 |
+
print(f"Inconsistent discourse: {tid} {idlist}:")
|
465 |
+
for rid, reading in self.expand_threads(tid):
|
466 |
+
print(f" {rid}: {reading.normalize()}")
|
467 |
+
print()
|
468 |
+
else:
|
469 |
+
print(f"Consistent discourse: {tid} {idlist}:")
|
470 |
+
for rid, reading in self.expand_threads(tid):
|
471 |
+
print(f" {rid}: {reading.normalize()}")
|
472 |
+
print()
|
473 |
+
|
474 |
+
def add_background(self, background, verbose=False):
|
475 |
+
"""
|
476 |
+
Add a list of background assumptions for reasoning about the discourse.
|
477 |
+
|
478 |
+
When called, this method also updates the discourse model's set of readings and threads.
|
479 |
+
:param background: Formulas which contain background information
|
480 |
+
:type background: list(Expression)
|
481 |
+
"""
|
482 |
+
from nltk.sem.logic import Expression
|
483 |
+
|
484 |
+
for (count, e) in enumerate(background):
|
485 |
+
assert isinstance(e, Expression)
|
486 |
+
if verbose:
|
487 |
+
print("Adding assumption %s to background" % count)
|
488 |
+
self._background.append(e)
|
489 |
+
|
490 |
+
# update the state
|
491 |
+
self._construct_readings()
|
492 |
+
self._construct_threads()
|
493 |
+
|
494 |
+
def background(self):
|
495 |
+
"""
|
496 |
+
Show the current background assumptions.
|
497 |
+
"""
|
498 |
+
for e in self._background:
|
499 |
+
print(str(e))
|
500 |
+
|
501 |
+
###############################
|
502 |
+
# Misc
|
503 |
+
###############################
|
504 |
+
|
505 |
+
@staticmethod
|
506 |
+
def multiply(discourse, readings):
|
507 |
+
"""
|
508 |
+
Multiply every thread in ``discourse`` by every reading in ``readings``.
|
509 |
+
|
510 |
+
Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns
|
511 |
+
[['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']]
|
512 |
+
|
513 |
+
:param discourse: the current list of readings
|
514 |
+
:type discourse: list of lists
|
515 |
+
:param readings: an additional list of readings
|
516 |
+
:type readings: list(Expression)
|
517 |
+
:rtype: A list of lists
|
518 |
+
"""
|
519 |
+
result = []
|
520 |
+
for sublist in discourse:
|
521 |
+
for r in readings:
|
522 |
+
new = []
|
523 |
+
new += sublist
|
524 |
+
new.append(r)
|
525 |
+
result.append(new)
|
526 |
+
return result
|
527 |
+
|
528 |
+
|
529 |
+
def load_fol(s):
|
530 |
+
"""
|
531 |
+
Temporarily duplicated from ``nltk.sem.util``.
|
532 |
+
Convert a file of first order formulas into a list of ``Expression`` objects.
|
533 |
+
|
534 |
+
:param s: the contents of the file
|
535 |
+
:type s: str
|
536 |
+
:return: a list of parsed formulas.
|
537 |
+
:rtype: list(Expression)
|
538 |
+
"""
|
539 |
+
statements = []
|
540 |
+
for linenum, line in enumerate(s.splitlines()):
|
541 |
+
line = line.strip()
|
542 |
+
if line.startswith("#") or line == "":
|
543 |
+
continue
|
544 |
+
try:
|
545 |
+
statements.append(Expression.fromstring(line))
|
546 |
+
except Exception as e:
|
547 |
+
raise ValueError(f"Unable to parse line {linenum}: {line}") from e
|
548 |
+
return statements
|
549 |
+
|
550 |
+
|
551 |
+
###############################
|
552 |
+
# Demo
|
553 |
+
###############################
|
554 |
+
def discourse_demo(reading_command=None):
|
555 |
+
"""
|
556 |
+
Illustrate the various methods of ``DiscourseTester``
|
557 |
+
"""
|
558 |
+
dt = DiscourseTester(
|
559 |
+
["A boxer walks", "Every boxer chases a girl"], reading_command
|
560 |
+
)
|
561 |
+
dt.models()
|
562 |
+
print()
|
563 |
+
# dt.grammar()
|
564 |
+
print()
|
565 |
+
dt.sentences()
|
566 |
+
print()
|
567 |
+
dt.readings()
|
568 |
+
print()
|
569 |
+
dt.readings(threaded=True)
|
570 |
+
print()
|
571 |
+
dt.models("d1")
|
572 |
+
dt.add_sentence("John is a boxer")
|
573 |
+
print()
|
574 |
+
dt.sentences()
|
575 |
+
print()
|
576 |
+
dt.readings(threaded=True)
|
577 |
+
print()
|
578 |
+
dt = DiscourseTester(
|
579 |
+
["A student dances", "Every student is a person"], reading_command
|
580 |
+
)
|
581 |
+
print()
|
582 |
+
dt.add_sentence("No person dances", consistchk=True)
|
583 |
+
print()
|
584 |
+
dt.readings()
|
585 |
+
print()
|
586 |
+
dt.retract_sentence("No person dances", verbose=True)
|
587 |
+
print()
|
588 |
+
dt.models()
|
589 |
+
print()
|
590 |
+
dt.readings("A person dances")
|
591 |
+
print()
|
592 |
+
dt.add_sentence("A person dances", informchk=True)
|
593 |
+
dt = DiscourseTester(
|
594 |
+
["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"],
|
595 |
+
reading_command,
|
596 |
+
)
|
597 |
+
dt.readings(filter=True)
|
598 |
+
import nltk.data
|
599 |
+
|
600 |
+
background_file = os.path.join("grammars", "book_grammars", "background.fol")
|
601 |
+
background = nltk.data.load(background_file)
|
602 |
+
|
603 |
+
print()
|
604 |
+
dt.add_background(background, verbose=False)
|
605 |
+
dt.background()
|
606 |
+
print()
|
607 |
+
dt.readings(filter=True)
|
608 |
+
print()
|
609 |
+
dt.models()
|
610 |
+
|
611 |
+
|
612 |
+
def drt_discourse_demo(reading_command=None):
|
613 |
+
"""
|
614 |
+
Illustrate the various methods of ``DiscourseTester``
|
615 |
+
"""
|
616 |
+
dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command)
|
617 |
+
dt.models()
|
618 |
+
print()
|
619 |
+
dt.sentences()
|
620 |
+
print()
|
621 |
+
dt.readings()
|
622 |
+
print()
|
623 |
+
dt.readings(show_thread_readings=True)
|
624 |
+
print()
|
625 |
+
dt.readings(filter=True, show_thread_readings=True)
|
626 |
+
|
627 |
+
|
628 |
+
def spacer(num=30):
|
629 |
+
print("-" * num)
|
630 |
+
|
631 |
+
|
632 |
+
def demo():
|
633 |
+
discourse_demo()
|
634 |
+
|
635 |
+
tagger = RegexpTagger(
|
636 |
+
[
|
637 |
+
("^(chases|runs)$", "VB"),
|
638 |
+
("^(a)$", "ex_quant"),
|
639 |
+
("^(every)$", "univ_quant"),
|
640 |
+
("^(dog|boy)$", "NN"),
|
641 |
+
("^(he)$", "PRP"),
|
642 |
+
]
|
643 |
+
)
|
644 |
+
depparser = MaltParser(tagger=tagger)
|
645 |
+
drt_discourse_demo(
|
646 |
+
DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser)
|
647 |
+
)
|
648 |
+
|
649 |
+
|
650 |
+
if __name__ == "__main__":
|
651 |
+
demo()
|
venv/lib/python3.10/site-packages/nltk/inference/mace.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to the Mace4 Model Builder
|
2 |
+
#
|
3 |
+
# Author: Dan Garrette <[email protected]>
|
4 |
+
# Ewan Klein <[email protected]>
|
5 |
+
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
A model builder that makes use of the external 'Mace4' package.
|
11 |
+
"""
|
12 |
+
|
13 |
+
import os
|
14 |
+
import tempfile
|
15 |
+
|
16 |
+
from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder
|
17 |
+
from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
|
18 |
+
from nltk.sem import Expression, Valuation
|
19 |
+
from nltk.sem.logic import is_indvar
|
20 |
+
|
21 |
+
|
22 |
+
class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand):
|
23 |
+
"""
|
24 |
+
A ``MaceCommand`` specific to the ``Mace`` model builder. It contains
|
25 |
+
a print_assumptions() method that is used to print the list
|
26 |
+
of assumptions in multiple formats.
|
27 |
+
"""
|
28 |
+
|
29 |
+
_interpformat_bin = None
|
30 |
+
|
31 |
+
def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None):
|
32 |
+
"""
|
33 |
+
:param goal: Input expression to prove
|
34 |
+
:type goal: sem.Expression
|
35 |
+
:param assumptions: Input expressions to use as assumptions in
|
36 |
+
the proof.
|
37 |
+
:type assumptions: list(sem.Expression)
|
38 |
+
:param max_models: The maximum number of models that Mace will try before
|
39 |
+
simply returning false. (Use 0 for no maximum.)
|
40 |
+
:type max_models: int
|
41 |
+
"""
|
42 |
+
if model_builder is not None:
|
43 |
+
assert isinstance(model_builder, Mace)
|
44 |
+
else:
|
45 |
+
model_builder = Mace(max_models)
|
46 |
+
|
47 |
+
BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions)
|
48 |
+
|
49 |
+
@property
|
50 |
+
def valuation(mbc):
|
51 |
+
return mbc.model("valuation")
|
52 |
+
|
53 |
+
def _convert2val(self, valuation_str):
|
54 |
+
"""
|
55 |
+
Transform the output file into an NLTK-style Valuation.
|
56 |
+
|
57 |
+
:return: A model if one is generated; None otherwise.
|
58 |
+
:rtype: sem.Valuation
|
59 |
+
"""
|
60 |
+
valuation_standard_format = self._transform_output(valuation_str, "standard")
|
61 |
+
|
62 |
+
val = []
|
63 |
+
for line in valuation_standard_format.splitlines(False):
|
64 |
+
l = line.strip()
|
65 |
+
|
66 |
+
if l.startswith("interpretation"):
|
67 |
+
# find the number of entities in the model
|
68 |
+
num_entities = int(l[l.index("(") + 1 : l.index(",")].strip())
|
69 |
+
|
70 |
+
elif l.startswith("function") and l.find("_") == -1:
|
71 |
+
# replace the integer identifier with a corresponding alphabetic character
|
72 |
+
name = l[l.index("(") + 1 : l.index(",")].strip()
|
73 |
+
if is_indvar(name):
|
74 |
+
name = name.upper()
|
75 |
+
value = int(l[l.index("[") + 1 : l.index("]")].strip())
|
76 |
+
val.append((name, MaceCommand._make_model_var(value)))
|
77 |
+
|
78 |
+
elif l.startswith("relation"):
|
79 |
+
l = l[l.index("(") + 1 :]
|
80 |
+
if "(" in l:
|
81 |
+
# relation is not nullary
|
82 |
+
name = l[: l.index("(")].strip()
|
83 |
+
values = [
|
84 |
+
int(v.strip())
|
85 |
+
for v in l[l.index("[") + 1 : l.index("]")].split(",")
|
86 |
+
]
|
87 |
+
val.append(
|
88 |
+
(name, MaceCommand._make_relation_set(num_entities, values))
|
89 |
+
)
|
90 |
+
else:
|
91 |
+
# relation is nullary
|
92 |
+
name = l[: l.index(",")].strip()
|
93 |
+
value = int(l[l.index("[") + 1 : l.index("]")].strip())
|
94 |
+
val.append((name, value == 1))
|
95 |
+
|
96 |
+
return Valuation(val)
|
97 |
+
|
98 |
+
@staticmethod
|
99 |
+
def _make_relation_set(num_entities, values):
|
100 |
+
"""
|
101 |
+
Convert a Mace4-style relation table into a dictionary.
|
102 |
+
|
103 |
+
:param num_entities: the number of entities in the model; determines the row length in the table.
|
104 |
+
:type num_entities: int
|
105 |
+
:param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model.
|
106 |
+
:type values: list of int
|
107 |
+
"""
|
108 |
+
r = set()
|
109 |
+
for position in [pos for (pos, v) in enumerate(values) if v == 1]:
|
110 |
+
r.add(
|
111 |
+
tuple(MaceCommand._make_relation_tuple(position, values, num_entities))
|
112 |
+
)
|
113 |
+
return r
|
114 |
+
|
115 |
+
@staticmethod
|
116 |
+
def _make_relation_tuple(position, values, num_entities):
|
117 |
+
if len(values) == 1:
|
118 |
+
return []
|
119 |
+
else:
|
120 |
+
sublist_size = len(values) // num_entities
|
121 |
+
sublist_start = position // sublist_size
|
122 |
+
sublist_position = int(position % sublist_size)
|
123 |
+
|
124 |
+
sublist = values[
|
125 |
+
sublist_start * sublist_size : (sublist_start + 1) * sublist_size
|
126 |
+
]
|
127 |
+
return [
|
128 |
+
MaceCommand._make_model_var(sublist_start)
|
129 |
+
] + MaceCommand._make_relation_tuple(
|
130 |
+
sublist_position, sublist, num_entities
|
131 |
+
)
|
132 |
+
|
133 |
+
@staticmethod
|
134 |
+
def _make_model_var(value):
|
135 |
+
"""
|
136 |
+
Pick an alphabetic character as identifier for an entity in the model.
|
137 |
+
|
138 |
+
:param value: where to index into the list of characters
|
139 |
+
:type value: int
|
140 |
+
"""
|
141 |
+
letter = [
|
142 |
+
"a",
|
143 |
+
"b",
|
144 |
+
"c",
|
145 |
+
"d",
|
146 |
+
"e",
|
147 |
+
"f",
|
148 |
+
"g",
|
149 |
+
"h",
|
150 |
+
"i",
|
151 |
+
"j",
|
152 |
+
"k",
|
153 |
+
"l",
|
154 |
+
"m",
|
155 |
+
"n",
|
156 |
+
"o",
|
157 |
+
"p",
|
158 |
+
"q",
|
159 |
+
"r",
|
160 |
+
"s",
|
161 |
+
"t",
|
162 |
+
"u",
|
163 |
+
"v",
|
164 |
+
"w",
|
165 |
+
"x",
|
166 |
+
"y",
|
167 |
+
"z",
|
168 |
+
][value]
|
169 |
+
num = value // 26
|
170 |
+
return letter + str(num) if num > 0 else letter
|
171 |
+
|
172 |
+
def _decorate_model(self, valuation_str, format):
|
173 |
+
"""
|
174 |
+
Print out a Mace4 model using any Mace4 ``interpformat`` format.
|
175 |
+
See https://www.cs.unm.edu/~mccune/mace4/manual/ for details.
|
176 |
+
|
177 |
+
:param valuation_str: str with the model builder's output
|
178 |
+
:param format: str indicating the format for displaying
|
179 |
+
models. Defaults to 'standard' format.
|
180 |
+
:return: str
|
181 |
+
"""
|
182 |
+
if not format:
|
183 |
+
return valuation_str
|
184 |
+
elif format == "valuation":
|
185 |
+
return self._convert2val(valuation_str)
|
186 |
+
else:
|
187 |
+
return self._transform_output(valuation_str, format)
|
188 |
+
|
189 |
+
def _transform_output(self, valuation_str, format):
|
190 |
+
"""
|
191 |
+
Transform the output file into any Mace4 ``interpformat`` format.
|
192 |
+
|
193 |
+
:param format: Output format for displaying models.
|
194 |
+
:type format: str
|
195 |
+
"""
|
196 |
+
if format in [
|
197 |
+
"standard",
|
198 |
+
"standard2",
|
199 |
+
"portable",
|
200 |
+
"tabular",
|
201 |
+
"raw",
|
202 |
+
"cooked",
|
203 |
+
"xml",
|
204 |
+
"tex",
|
205 |
+
]:
|
206 |
+
return self._call_interpformat(valuation_str, [format])[0]
|
207 |
+
else:
|
208 |
+
raise LookupError("The specified format does not exist")
|
209 |
+
|
210 |
+
def _call_interpformat(self, input_str, args=[], verbose=False):
|
211 |
+
"""
|
212 |
+
Call the ``interpformat`` binary with the given input.
|
213 |
+
|
214 |
+
:param input_str: A string whose contents are used as stdin.
|
215 |
+
:param args: A list of command-line arguments.
|
216 |
+
:return: A tuple (stdout, returncode)
|
217 |
+
:see: ``config_prover9``
|
218 |
+
"""
|
219 |
+
if self._interpformat_bin is None:
|
220 |
+
self._interpformat_bin = self._modelbuilder._find_binary(
|
221 |
+
"interpformat", verbose
|
222 |
+
)
|
223 |
+
|
224 |
+
return self._modelbuilder._call(
|
225 |
+
input_str, self._interpformat_bin, args, verbose
|
226 |
+
)
|
227 |
+
|
228 |
+
|
229 |
+
class Mace(Prover9Parent, ModelBuilder):
|
230 |
+
_mace4_bin = None
|
231 |
+
|
232 |
+
def __init__(self, end_size=500):
|
233 |
+
self._end_size = end_size
|
234 |
+
"""The maximum model size that Mace will try before
|
235 |
+
simply returning false. (Use -1 for no maximum.)"""
|
236 |
+
|
237 |
+
def _build_model(self, goal=None, assumptions=None, verbose=False):
|
238 |
+
"""
|
239 |
+
Use Mace4 to build a first order model.
|
240 |
+
|
241 |
+
:return: ``True`` if a model was found (i.e. Mace returns value of 0),
|
242 |
+
else ``False``
|
243 |
+
"""
|
244 |
+
if not assumptions:
|
245 |
+
assumptions = []
|
246 |
+
|
247 |
+
stdout, returncode = self._call_mace4(
|
248 |
+
self.prover9_input(goal, assumptions), verbose=verbose
|
249 |
+
)
|
250 |
+
return (returncode == 0, stdout)
|
251 |
+
|
252 |
+
def _call_mace4(self, input_str, args=[], verbose=False):
|
253 |
+
"""
|
254 |
+
Call the ``mace4`` binary with the given input.
|
255 |
+
|
256 |
+
:param input_str: A string whose contents are used as stdin.
|
257 |
+
:param args: A list of command-line arguments.
|
258 |
+
:return: A tuple (stdout, returncode)
|
259 |
+
:see: ``config_prover9``
|
260 |
+
"""
|
261 |
+
if self._mace4_bin is None:
|
262 |
+
self._mace4_bin = self._find_binary("mace4", verbose)
|
263 |
+
|
264 |
+
updated_input_str = ""
|
265 |
+
if self._end_size > 0:
|
266 |
+
updated_input_str += "assign(end_size, %d).\n\n" % self._end_size
|
267 |
+
updated_input_str += input_str
|
268 |
+
|
269 |
+
return self._call(updated_input_str, self._mace4_bin, args, verbose)
|
270 |
+
|
271 |
+
|
272 |
+
def spacer(num=30):
|
273 |
+
print("-" * num)
|
274 |
+
|
275 |
+
|
276 |
+
def decode_result(found):
|
277 |
+
"""
|
278 |
+
Decode the result of model_found()
|
279 |
+
|
280 |
+
:param found: The output of model_found()
|
281 |
+
:type found: bool
|
282 |
+
"""
|
283 |
+
return {True: "Countermodel found", False: "No countermodel found", None: "None"}[
|
284 |
+
found
|
285 |
+
]
|
286 |
+
|
287 |
+
|
288 |
+
def test_model_found(arguments):
|
289 |
+
"""
|
290 |
+
Try some proofs and exhibit the results.
|
291 |
+
"""
|
292 |
+
for (goal, assumptions) in arguments:
|
293 |
+
g = Expression.fromstring(goal)
|
294 |
+
alist = [lp.parse(a) for a in assumptions]
|
295 |
+
m = MaceCommand(g, assumptions=alist, max_models=50)
|
296 |
+
found = m.build_model()
|
297 |
+
for a in alist:
|
298 |
+
print(" %s" % a)
|
299 |
+
print(f"|- {g}: {decode_result(found)}\n")
|
300 |
+
|
301 |
+
|
302 |
+
def test_build_model(arguments):
|
303 |
+
"""
|
304 |
+
Try to build a ``nltk.sem.Valuation``.
|
305 |
+
"""
|
306 |
+
g = Expression.fromstring("all x.man(x)")
|
307 |
+
alist = [
|
308 |
+
Expression.fromstring(a)
|
309 |
+
for a in [
|
310 |
+
"man(John)",
|
311 |
+
"man(Socrates)",
|
312 |
+
"man(Bill)",
|
313 |
+
"some x.(-(x = John) & man(x) & sees(John,x))",
|
314 |
+
"some x.(-(x = Bill) & man(x))",
|
315 |
+
"all x.some y.(man(x) -> gives(Socrates,x,y))",
|
316 |
+
]
|
317 |
+
]
|
318 |
+
|
319 |
+
m = MaceCommand(g, assumptions=alist)
|
320 |
+
m.build_model()
|
321 |
+
spacer()
|
322 |
+
print("Assumptions and Goal")
|
323 |
+
spacer()
|
324 |
+
for a in alist:
|
325 |
+
print(" %s" % a)
|
326 |
+
print(f"|- {g}: {decode_result(m.build_model())}\n")
|
327 |
+
spacer()
|
328 |
+
# print(m.model('standard'))
|
329 |
+
# print(m.model('cooked'))
|
330 |
+
print("Valuation")
|
331 |
+
spacer()
|
332 |
+
print(m.valuation, "\n")
|
333 |
+
|
334 |
+
|
335 |
+
def test_transform_output(argument_pair):
|
336 |
+
"""
|
337 |
+
Transform the model into various Mace4 ``interpformat`` formats.
|
338 |
+
"""
|
339 |
+
g = Expression.fromstring(argument_pair[0])
|
340 |
+
alist = [lp.parse(a) for a in argument_pair[1]]
|
341 |
+
m = MaceCommand(g, assumptions=alist)
|
342 |
+
m.build_model()
|
343 |
+
for a in alist:
|
344 |
+
print(" %s" % a)
|
345 |
+
print(f"|- {g}: {m.build_model()}\n")
|
346 |
+
for format in ["standard", "portable", "xml", "cooked"]:
|
347 |
+
spacer()
|
348 |
+
print("Using '%s' format" % format)
|
349 |
+
spacer()
|
350 |
+
print(m.model(format=format))
|
351 |
+
|
352 |
+
|
353 |
+
def test_make_relation_set():
|
354 |
+
print(
|
355 |
+
MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1])
|
356 |
+
== {("c",), ("a",)}
|
357 |
+
)
|
358 |
+
print(
|
359 |
+
MaceCommand._make_relation_set(
|
360 |
+
num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0]
|
361 |
+
)
|
362 |
+
== {("c", "a")}
|
363 |
+
)
|
364 |
+
print(
|
365 |
+
MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0])
|
366 |
+
== {("a", "b", "a"), ("b", "b", "a")}
|
367 |
+
)
|
368 |
+
|
369 |
+
|
370 |
+
arguments = [
|
371 |
+
("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
|
372 |
+
("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
|
373 |
+
]
|
374 |
+
|
375 |
+
|
376 |
+
def demo():
|
377 |
+
test_model_found(arguments)
|
378 |
+
test_build_model(arguments)
|
379 |
+
test_transform_output(arguments[1])
|
380 |
+
|
381 |
+
|
382 |
+
if __name__ == "__main__":
|
383 |
+
demo()
|
venv/lib/python3.10/site-packages/nltk/inference/prover9.py
ADDED
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to the Prover9 Theorem Prover
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Dan Garrette <[email protected]>
|
5 |
+
# Ewan Klein <[email protected]>
|
6 |
+
#
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
"""
|
10 |
+
A theorem prover that makes use of the external 'Prover9' package.
|
11 |
+
"""
|
12 |
+
|
13 |
+
import os
|
14 |
+
import subprocess
|
15 |
+
|
16 |
+
import nltk
|
17 |
+
from nltk.inference.api import BaseProverCommand, Prover
|
18 |
+
from nltk.sem.logic import (
|
19 |
+
AllExpression,
|
20 |
+
AndExpression,
|
21 |
+
EqualityExpression,
|
22 |
+
ExistsExpression,
|
23 |
+
Expression,
|
24 |
+
IffExpression,
|
25 |
+
ImpExpression,
|
26 |
+
NegatedExpression,
|
27 |
+
OrExpression,
|
28 |
+
)
|
29 |
+
|
30 |
+
#
|
31 |
+
# Following is not yet used. Return code for 2 actually realized as 512.
|
32 |
+
#
|
33 |
+
p9_return_codes = {
|
34 |
+
0: True,
|
35 |
+
1: "(FATAL)", # A fatal error occurred (user's syntax error).
|
36 |
+
2: False, # (SOS_EMPTY) Prover9 ran out of things to do
|
37 |
+
# (sos list exhausted).
|
38 |
+
3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded.
|
39 |
+
4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded.
|
40 |
+
5: "(MAX_GIVEN)", # The max_given parameter was exceeded.
|
41 |
+
6: "(MAX_KEPT)", # The max_kept parameter was exceeded.
|
42 |
+
7: "(ACTION)", # A Prover9 action terminated the search.
|
43 |
+
101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug.
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
class Prover9CommandParent:
|
48 |
+
"""
|
49 |
+
A common base class used by both ``Prover9Command`` and ``MaceCommand``,
|
50 |
+
which is responsible for maintaining a goal and a set of assumptions,
|
51 |
+
and generating prover9-style input files from them.
|
52 |
+
"""
|
53 |
+
|
54 |
+
def print_assumptions(self, output_format="nltk"):
|
55 |
+
"""
|
56 |
+
Print the list of the current assumptions.
|
57 |
+
"""
|
58 |
+
if output_format.lower() == "nltk":
|
59 |
+
for a in self.assumptions():
|
60 |
+
print(a)
|
61 |
+
elif output_format.lower() == "prover9":
|
62 |
+
for a in convert_to_prover9(self.assumptions()):
|
63 |
+
print(a)
|
64 |
+
else:
|
65 |
+
raise NameError(
|
66 |
+
"Unrecognized value for 'output_format': %s" % output_format
|
67 |
+
)
|
68 |
+
|
69 |
+
|
70 |
+
class Prover9Command(Prover9CommandParent, BaseProverCommand):
|
71 |
+
"""
|
72 |
+
A ``ProverCommand`` specific to the ``Prover9`` prover. It contains
|
73 |
+
the a print_assumptions() method that is used to print the list
|
74 |
+
of assumptions in multiple formats.
|
75 |
+
"""
|
76 |
+
|
77 |
+
def __init__(self, goal=None, assumptions=None, timeout=60, prover=None):
|
78 |
+
"""
|
79 |
+
:param goal: Input expression to prove
|
80 |
+
:type goal: sem.Expression
|
81 |
+
:param assumptions: Input expressions to use as assumptions in
|
82 |
+
the proof.
|
83 |
+
:type assumptions: list(sem.Expression)
|
84 |
+
:param timeout: number of seconds before timeout; set to 0 for
|
85 |
+
no timeout.
|
86 |
+
:type timeout: int
|
87 |
+
:param prover: a prover. If not set, one will be created.
|
88 |
+
:type prover: Prover9
|
89 |
+
"""
|
90 |
+
if not assumptions:
|
91 |
+
assumptions = []
|
92 |
+
|
93 |
+
if prover is not None:
|
94 |
+
assert isinstance(prover, Prover9)
|
95 |
+
else:
|
96 |
+
prover = Prover9(timeout)
|
97 |
+
|
98 |
+
BaseProverCommand.__init__(self, prover, goal, assumptions)
|
99 |
+
|
100 |
+
def decorate_proof(self, proof_string, simplify=True):
|
101 |
+
"""
|
102 |
+
:see BaseProverCommand.decorate_proof()
|
103 |
+
"""
|
104 |
+
if simplify:
|
105 |
+
return self._prover._call_prooftrans(proof_string, ["striplabels"])[
|
106 |
+
0
|
107 |
+
].rstrip()
|
108 |
+
else:
|
109 |
+
return proof_string.rstrip()
|
110 |
+
|
111 |
+
|
112 |
+
class Prover9Parent:
|
113 |
+
"""
|
114 |
+
A common class extended by both ``Prover9`` and ``Mace <mace.Mace>``.
|
115 |
+
It contains the functionality required to convert NLTK-style
|
116 |
+
expressions into Prover9-style expressions.
|
117 |
+
"""
|
118 |
+
|
119 |
+
_binary_location = None
|
120 |
+
|
121 |
+
def config_prover9(self, binary_location, verbose=False):
|
122 |
+
if binary_location is None:
|
123 |
+
self._binary_location = None
|
124 |
+
self._prover9_bin = None
|
125 |
+
else:
|
126 |
+
name = "prover9"
|
127 |
+
self._prover9_bin = nltk.internals.find_binary(
|
128 |
+
name,
|
129 |
+
path_to_bin=binary_location,
|
130 |
+
env_vars=["PROVER9"],
|
131 |
+
url="https://www.cs.unm.edu/~mccune/prover9/",
|
132 |
+
binary_names=[name, name + ".exe"],
|
133 |
+
verbose=verbose,
|
134 |
+
)
|
135 |
+
self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1)
|
136 |
+
|
137 |
+
def prover9_input(self, goal, assumptions):
|
138 |
+
"""
|
139 |
+
:return: The input string that should be provided to the
|
140 |
+
prover9 binary. This string is formed based on the goal,
|
141 |
+
assumptions, and timeout value of this object.
|
142 |
+
"""
|
143 |
+
s = ""
|
144 |
+
|
145 |
+
if assumptions:
|
146 |
+
s += "formulas(assumptions).\n"
|
147 |
+
for p9_assumption in convert_to_prover9(assumptions):
|
148 |
+
s += " %s.\n" % p9_assumption
|
149 |
+
s += "end_of_list.\n\n"
|
150 |
+
|
151 |
+
if goal:
|
152 |
+
s += "formulas(goals).\n"
|
153 |
+
s += " %s.\n" % convert_to_prover9(goal)
|
154 |
+
s += "end_of_list.\n\n"
|
155 |
+
|
156 |
+
return s
|
157 |
+
|
158 |
+
def binary_locations(self):
|
159 |
+
"""
|
160 |
+
A list of directories that should be searched for the prover9
|
161 |
+
executables. This list is used by ``config_prover9`` when searching
|
162 |
+
for the prover9 executables.
|
163 |
+
"""
|
164 |
+
return [
|
165 |
+
"/usr/local/bin/prover9",
|
166 |
+
"/usr/local/bin/prover9/bin",
|
167 |
+
"/usr/local/bin",
|
168 |
+
"/usr/bin",
|
169 |
+
"/usr/local/prover9",
|
170 |
+
"/usr/local/share/prover9",
|
171 |
+
]
|
172 |
+
|
173 |
+
def _find_binary(self, name, verbose=False):
|
174 |
+
binary_locations = self.binary_locations()
|
175 |
+
if self._binary_location is not None:
|
176 |
+
binary_locations += [self._binary_location]
|
177 |
+
return nltk.internals.find_binary(
|
178 |
+
name,
|
179 |
+
searchpath=binary_locations,
|
180 |
+
env_vars=["PROVER9"],
|
181 |
+
url="https://www.cs.unm.edu/~mccune/prover9/",
|
182 |
+
binary_names=[name, name + ".exe"],
|
183 |
+
verbose=verbose,
|
184 |
+
)
|
185 |
+
|
186 |
+
def _call(self, input_str, binary, args=[], verbose=False):
|
187 |
+
"""
|
188 |
+
Call the binary with the given input.
|
189 |
+
|
190 |
+
:param input_str: A string whose contents are used as stdin.
|
191 |
+
:param binary: The location of the binary to call
|
192 |
+
:param args: A list of command-line arguments.
|
193 |
+
:return: A tuple (stdout, returncode)
|
194 |
+
:see: ``config_prover9``
|
195 |
+
"""
|
196 |
+
if verbose:
|
197 |
+
print("Calling:", binary)
|
198 |
+
print("Args:", args)
|
199 |
+
print("Input:\n", input_str, "\n")
|
200 |
+
|
201 |
+
# Call prover9 via a subprocess
|
202 |
+
cmd = [binary] + args
|
203 |
+
try:
|
204 |
+
input_str = input_str.encode("utf8")
|
205 |
+
except AttributeError:
|
206 |
+
pass
|
207 |
+
p = subprocess.Popen(
|
208 |
+
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
|
209 |
+
)
|
210 |
+
(stdout, stderr) = p.communicate(input=input_str)
|
211 |
+
|
212 |
+
if verbose:
|
213 |
+
print("Return code:", p.returncode)
|
214 |
+
if stdout:
|
215 |
+
print("stdout:\n", stdout, "\n")
|
216 |
+
if stderr:
|
217 |
+
print("stderr:\n", stderr, "\n")
|
218 |
+
|
219 |
+
return (stdout.decode("utf-8"), p.returncode)
|
220 |
+
|
221 |
+
|
222 |
+
def convert_to_prover9(input):
|
223 |
+
"""
|
224 |
+
Convert a ``logic.Expression`` to Prover9 format.
|
225 |
+
"""
|
226 |
+
if isinstance(input, list):
|
227 |
+
result = []
|
228 |
+
for s in input:
|
229 |
+
try:
|
230 |
+
result.append(_convert_to_prover9(s.simplify()))
|
231 |
+
except:
|
232 |
+
print("input %s cannot be converted to Prover9 input syntax" % input)
|
233 |
+
raise
|
234 |
+
return result
|
235 |
+
else:
|
236 |
+
try:
|
237 |
+
return _convert_to_prover9(input.simplify())
|
238 |
+
except:
|
239 |
+
print("input %s cannot be converted to Prover9 input syntax" % input)
|
240 |
+
raise
|
241 |
+
|
242 |
+
|
243 |
+
def _convert_to_prover9(expression):
|
244 |
+
"""
|
245 |
+
Convert ``logic.Expression`` to Prover9 formatted string.
|
246 |
+
"""
|
247 |
+
if isinstance(expression, ExistsExpression):
|
248 |
+
return (
|
249 |
+
"exists "
|
250 |
+
+ str(expression.variable)
|
251 |
+
+ " "
|
252 |
+
+ _convert_to_prover9(expression.term)
|
253 |
+
)
|
254 |
+
elif isinstance(expression, AllExpression):
|
255 |
+
return (
|
256 |
+
"all "
|
257 |
+
+ str(expression.variable)
|
258 |
+
+ " "
|
259 |
+
+ _convert_to_prover9(expression.term)
|
260 |
+
)
|
261 |
+
elif isinstance(expression, NegatedExpression):
|
262 |
+
return "-(" + _convert_to_prover9(expression.term) + ")"
|
263 |
+
elif isinstance(expression, AndExpression):
|
264 |
+
return (
|
265 |
+
"("
|
266 |
+
+ _convert_to_prover9(expression.first)
|
267 |
+
+ " & "
|
268 |
+
+ _convert_to_prover9(expression.second)
|
269 |
+
+ ")"
|
270 |
+
)
|
271 |
+
elif isinstance(expression, OrExpression):
|
272 |
+
return (
|
273 |
+
"("
|
274 |
+
+ _convert_to_prover9(expression.first)
|
275 |
+
+ " | "
|
276 |
+
+ _convert_to_prover9(expression.second)
|
277 |
+
+ ")"
|
278 |
+
)
|
279 |
+
elif isinstance(expression, ImpExpression):
|
280 |
+
return (
|
281 |
+
"("
|
282 |
+
+ _convert_to_prover9(expression.first)
|
283 |
+
+ " -> "
|
284 |
+
+ _convert_to_prover9(expression.second)
|
285 |
+
+ ")"
|
286 |
+
)
|
287 |
+
elif isinstance(expression, IffExpression):
|
288 |
+
return (
|
289 |
+
"("
|
290 |
+
+ _convert_to_prover9(expression.first)
|
291 |
+
+ " <-> "
|
292 |
+
+ _convert_to_prover9(expression.second)
|
293 |
+
+ ")"
|
294 |
+
)
|
295 |
+
elif isinstance(expression, EqualityExpression):
|
296 |
+
return (
|
297 |
+
"("
|
298 |
+
+ _convert_to_prover9(expression.first)
|
299 |
+
+ " = "
|
300 |
+
+ _convert_to_prover9(expression.second)
|
301 |
+
+ ")"
|
302 |
+
)
|
303 |
+
else:
|
304 |
+
return str(expression)
|
305 |
+
|
306 |
+
|
307 |
+
class Prover9(Prover9Parent, Prover):
|
308 |
+
_prover9_bin = None
|
309 |
+
_prooftrans_bin = None
|
310 |
+
|
311 |
+
def __init__(self, timeout=60):
|
312 |
+
self._timeout = timeout
|
313 |
+
"""The timeout value for prover9. If a proof can not be found
|
314 |
+
in this amount of time, then prover9 will return false.
|
315 |
+
(Use 0 for no timeout.)"""
|
316 |
+
|
317 |
+
def _prove(self, goal=None, assumptions=None, verbose=False):
|
318 |
+
"""
|
319 |
+
Use Prover9 to prove a theorem.
|
320 |
+
:return: A pair whose first element is a boolean indicating if the
|
321 |
+
proof was successful (i.e. returns value of 0) and whose second element
|
322 |
+
is the output of the prover.
|
323 |
+
"""
|
324 |
+
if not assumptions:
|
325 |
+
assumptions = []
|
326 |
+
|
327 |
+
stdout, returncode = self._call_prover9(
|
328 |
+
self.prover9_input(goal, assumptions), verbose=verbose
|
329 |
+
)
|
330 |
+
return (returncode == 0, stdout)
|
331 |
+
|
332 |
+
def prover9_input(self, goal, assumptions):
|
333 |
+
"""
|
334 |
+
:see: Prover9Parent.prover9_input
|
335 |
+
"""
|
336 |
+
s = "clear(auto_denials).\n" # only one proof required
|
337 |
+
return s + Prover9Parent.prover9_input(self, goal, assumptions)
|
338 |
+
|
339 |
+
def _call_prover9(self, input_str, args=[], verbose=False):
|
340 |
+
"""
|
341 |
+
Call the ``prover9`` binary with the given input.
|
342 |
+
|
343 |
+
:param input_str: A string whose contents are used as stdin.
|
344 |
+
:param args: A list of command-line arguments.
|
345 |
+
:return: A tuple (stdout, returncode)
|
346 |
+
:see: ``config_prover9``
|
347 |
+
"""
|
348 |
+
if self._prover9_bin is None:
|
349 |
+
self._prover9_bin = self._find_binary("prover9", verbose)
|
350 |
+
|
351 |
+
updated_input_str = ""
|
352 |
+
if self._timeout > 0:
|
353 |
+
updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout
|
354 |
+
updated_input_str += input_str
|
355 |
+
|
356 |
+
stdout, returncode = self._call(
|
357 |
+
updated_input_str, self._prover9_bin, args, verbose
|
358 |
+
)
|
359 |
+
|
360 |
+
if returncode not in [0, 2]:
|
361 |
+
errormsgprefix = "%%ERROR:"
|
362 |
+
if errormsgprefix in stdout:
|
363 |
+
msgstart = stdout.index(errormsgprefix)
|
364 |
+
errormsg = stdout[msgstart:].strip()
|
365 |
+
else:
|
366 |
+
errormsg = None
|
367 |
+
if returncode in [3, 4, 5, 6]:
|
368 |
+
raise Prover9LimitExceededException(returncode, errormsg)
|
369 |
+
else:
|
370 |
+
raise Prover9FatalException(returncode, errormsg)
|
371 |
+
|
372 |
+
return stdout, returncode
|
373 |
+
|
374 |
+
def _call_prooftrans(self, input_str, args=[], verbose=False):
|
375 |
+
"""
|
376 |
+
Call the ``prooftrans`` binary with the given input.
|
377 |
+
|
378 |
+
:param input_str: A string whose contents are used as stdin.
|
379 |
+
:param args: A list of command-line arguments.
|
380 |
+
:return: A tuple (stdout, returncode)
|
381 |
+
:see: ``config_prover9``
|
382 |
+
"""
|
383 |
+
if self._prooftrans_bin is None:
|
384 |
+
self._prooftrans_bin = self._find_binary("prooftrans", verbose)
|
385 |
+
|
386 |
+
return self._call(input_str, self._prooftrans_bin, args, verbose)
|
387 |
+
|
388 |
+
|
389 |
+
class Prover9Exception(Exception):
|
390 |
+
def __init__(self, returncode, message):
|
391 |
+
msg = p9_return_codes[returncode]
|
392 |
+
if message:
|
393 |
+
msg += "\n%s" % message
|
394 |
+
Exception.__init__(self, msg)
|
395 |
+
|
396 |
+
|
397 |
+
class Prover9FatalException(Prover9Exception):
|
398 |
+
pass
|
399 |
+
|
400 |
+
|
401 |
+
class Prover9LimitExceededException(Prover9Exception):
|
402 |
+
pass
|
403 |
+
|
404 |
+
|
405 |
+
######################################################################
|
406 |
+
# { Tests and Demos
|
407 |
+
######################################################################
|
408 |
+
|
409 |
+
|
410 |
+
def test_config():
|
411 |
+
|
412 |
+
a = Expression.fromstring("(walk(j) & sing(j))")
|
413 |
+
g = Expression.fromstring("walk(j)")
|
414 |
+
p = Prover9Command(g, assumptions=[a])
|
415 |
+
p._executable_path = None
|
416 |
+
p.prover9_search = []
|
417 |
+
p.prove()
|
418 |
+
# config_prover9('/usr/local/bin')
|
419 |
+
print(p.prove())
|
420 |
+
print(p.proof())
|
421 |
+
|
422 |
+
|
423 |
+
def test_convert_to_prover9(expr):
|
424 |
+
"""
|
425 |
+
Test that parsing works OK.
|
426 |
+
"""
|
427 |
+
for t in expr:
|
428 |
+
e = Expression.fromstring(t)
|
429 |
+
print(convert_to_prover9(e))
|
430 |
+
|
431 |
+
|
432 |
+
def test_prove(arguments):
|
433 |
+
"""
|
434 |
+
Try some proofs and exhibit the results.
|
435 |
+
"""
|
436 |
+
for (goal, assumptions) in arguments:
|
437 |
+
g = Expression.fromstring(goal)
|
438 |
+
alist = [Expression.fromstring(a) for a in assumptions]
|
439 |
+
p = Prover9Command(g, assumptions=alist).prove()
|
440 |
+
for a in alist:
|
441 |
+
print(" %s" % a)
|
442 |
+
print(f"|- {g}: {p}\n")
|
443 |
+
|
444 |
+
|
445 |
+
arguments = [
|
446 |
+
("(man(x) <-> (not (not man(x))))", []),
|
447 |
+
("(not (man(x) & (not man(x))))", []),
|
448 |
+
("(man(x) | (not man(x)))", []),
|
449 |
+
("(man(x) & (not man(x)))", []),
|
450 |
+
("(man(x) -> man(x))", []),
|
451 |
+
("(not (man(x) & (not man(x))))", []),
|
452 |
+
("(man(x) | (not man(x)))", []),
|
453 |
+
("(man(x) -> man(x))", []),
|
454 |
+
("(man(x) <-> man(x))", []),
|
455 |
+
("(not (man(x) <-> (not man(x))))", []),
|
456 |
+
("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
|
457 |
+
("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []),
|
458 |
+
("(all x.man(x) -> all x.man(x))", []),
|
459 |
+
("some x.all y.sees(x,y)", []),
|
460 |
+
(
|
461 |
+
"some e3.(walk(e3) & subj(e3, mary))",
|
462 |
+
[
|
463 |
+
"some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
|
464 |
+
],
|
465 |
+
),
|
466 |
+
(
|
467 |
+
"some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))",
|
468 |
+
[
|
469 |
+
"some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
|
470 |
+
],
|
471 |
+
),
|
472 |
+
]
|
473 |
+
|
474 |
+
expressions = [
|
475 |
+
r"some x y.sees(x,y)",
|
476 |
+
r"some x.(man(x) & walks(x))",
|
477 |
+
r"\x.(man(x) & walks(x))",
|
478 |
+
r"\x y.sees(x,y)",
|
479 |
+
r"walks(john)",
|
480 |
+
r"\x.big(x, \y.mouse(y))",
|
481 |
+
r"(walks(x) & (runs(x) & (threes(x) & fours(x))))",
|
482 |
+
r"(walks(x) -> runs(x))",
|
483 |
+
r"some x.(PRO(x) & sees(John, x))",
|
484 |
+
r"some x.(man(x) & (not walks(x)))",
|
485 |
+
r"all x.(man(x) -> walks(x))",
|
486 |
+
]
|
487 |
+
|
488 |
+
|
489 |
+
def spacer(num=45):
|
490 |
+
print("-" * num)
|
491 |
+
|
492 |
+
|
493 |
+
def demo():
|
494 |
+
print("Testing configuration")
|
495 |
+
spacer()
|
496 |
+
test_config()
|
497 |
+
print()
|
498 |
+
print("Testing conversion to Prover9 format")
|
499 |
+
spacer()
|
500 |
+
test_convert_to_prover9(expressions)
|
501 |
+
print()
|
502 |
+
print("Testing proofs")
|
503 |
+
spacer()
|
504 |
+
test_prove(arguments)
|
505 |
+
|
506 |
+
|
507 |
+
if __name__ == "__main__":
|
508 |
+
demo()
|
venv/lib/python3.10/site-packages/nltk/inference/tableau.py
ADDED
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: First-Order Tableau Theorem Prover
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Dan Garrette <[email protected]>
|
5 |
+
#
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
"""
|
10 |
+
Module for a tableau-based First Order theorem prover.
|
11 |
+
"""
|
12 |
+
|
13 |
+
from nltk.inference.api import BaseProverCommand, Prover
|
14 |
+
from nltk.internals import Counter
|
15 |
+
from nltk.sem.logic import (
|
16 |
+
AbstractVariableExpression,
|
17 |
+
AllExpression,
|
18 |
+
AndExpression,
|
19 |
+
ApplicationExpression,
|
20 |
+
EqualityExpression,
|
21 |
+
ExistsExpression,
|
22 |
+
Expression,
|
23 |
+
FunctionVariableExpression,
|
24 |
+
IffExpression,
|
25 |
+
ImpExpression,
|
26 |
+
LambdaExpression,
|
27 |
+
NegatedExpression,
|
28 |
+
OrExpression,
|
29 |
+
Variable,
|
30 |
+
VariableExpression,
|
31 |
+
unique_variable,
|
32 |
+
)
|
33 |
+
|
34 |
+
_counter = Counter()
|
35 |
+
|
36 |
+
|
37 |
+
class ProverParseError(Exception):
|
38 |
+
pass
|
39 |
+
|
40 |
+
|
41 |
+
class TableauProver(Prover):
|
42 |
+
_assume_false = False
|
43 |
+
|
44 |
+
def _prove(self, goal=None, assumptions=None, verbose=False):
|
45 |
+
if not assumptions:
|
46 |
+
assumptions = []
|
47 |
+
|
48 |
+
result = None
|
49 |
+
try:
|
50 |
+
agenda = Agenda()
|
51 |
+
if goal:
|
52 |
+
agenda.put(-goal)
|
53 |
+
agenda.put_all(assumptions)
|
54 |
+
debugger = Debug(verbose)
|
55 |
+
result = self._attempt_proof(agenda, set(), set(), debugger)
|
56 |
+
except RuntimeError as e:
|
57 |
+
if self._assume_false and str(e).startswith(
|
58 |
+
"maximum recursion depth exceeded"
|
59 |
+
):
|
60 |
+
result = False
|
61 |
+
else:
|
62 |
+
if verbose:
|
63 |
+
print(e)
|
64 |
+
else:
|
65 |
+
raise e
|
66 |
+
return (result, "\n".join(debugger.lines))
|
67 |
+
|
68 |
+
def _attempt_proof(self, agenda, accessible_vars, atoms, debug):
|
69 |
+
(current, context), category = agenda.pop_first()
|
70 |
+
|
71 |
+
# if there's nothing left in the agenda, and we haven't closed the path
|
72 |
+
if not current:
|
73 |
+
debug.line("AGENDA EMPTY")
|
74 |
+
return False
|
75 |
+
|
76 |
+
proof_method = {
|
77 |
+
Categories.ATOM: self._attempt_proof_atom,
|
78 |
+
Categories.PROP: self._attempt_proof_prop,
|
79 |
+
Categories.N_ATOM: self._attempt_proof_n_atom,
|
80 |
+
Categories.N_PROP: self._attempt_proof_n_prop,
|
81 |
+
Categories.APP: self._attempt_proof_app,
|
82 |
+
Categories.N_APP: self._attempt_proof_n_app,
|
83 |
+
Categories.N_EQ: self._attempt_proof_n_eq,
|
84 |
+
Categories.D_NEG: self._attempt_proof_d_neg,
|
85 |
+
Categories.N_ALL: self._attempt_proof_n_all,
|
86 |
+
Categories.N_EXISTS: self._attempt_proof_n_some,
|
87 |
+
Categories.AND: self._attempt_proof_and,
|
88 |
+
Categories.N_OR: self._attempt_proof_n_or,
|
89 |
+
Categories.N_IMP: self._attempt_proof_n_imp,
|
90 |
+
Categories.OR: self._attempt_proof_or,
|
91 |
+
Categories.IMP: self._attempt_proof_imp,
|
92 |
+
Categories.N_AND: self._attempt_proof_n_and,
|
93 |
+
Categories.IFF: self._attempt_proof_iff,
|
94 |
+
Categories.N_IFF: self._attempt_proof_n_iff,
|
95 |
+
Categories.EQ: self._attempt_proof_eq,
|
96 |
+
Categories.EXISTS: self._attempt_proof_some,
|
97 |
+
Categories.ALL: self._attempt_proof_all,
|
98 |
+
}[category]
|
99 |
+
|
100 |
+
debug.line((current, context))
|
101 |
+
return proof_method(current, context, agenda, accessible_vars, atoms, debug)
|
102 |
+
|
103 |
+
def _attempt_proof_atom(
|
104 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
105 |
+
):
|
106 |
+
# Check if the branch is closed. Return 'True' if it is
|
107 |
+
if (current, True) in atoms:
|
108 |
+
debug.line("CLOSED", 1)
|
109 |
+
return True
|
110 |
+
|
111 |
+
if context:
|
112 |
+
if isinstance(context.term, NegatedExpression):
|
113 |
+
current = current.negate()
|
114 |
+
agenda.put(context(current).simplify())
|
115 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
116 |
+
else:
|
117 |
+
# mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
|
118 |
+
agenda.mark_alls_fresh()
|
119 |
+
return self._attempt_proof(
|
120 |
+
agenda,
|
121 |
+
accessible_vars | set(current.args),
|
122 |
+
atoms | {(current, False)},
|
123 |
+
debug + 1,
|
124 |
+
)
|
125 |
+
|
126 |
+
def _attempt_proof_n_atom(
|
127 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
128 |
+
):
|
129 |
+
# Check if the branch is closed. Return 'True' if it is
|
130 |
+
if (current.term, False) in atoms:
|
131 |
+
debug.line("CLOSED", 1)
|
132 |
+
return True
|
133 |
+
|
134 |
+
if context:
|
135 |
+
if isinstance(context.term, NegatedExpression):
|
136 |
+
current = current.negate()
|
137 |
+
agenda.put(context(current).simplify())
|
138 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
139 |
+
else:
|
140 |
+
# mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
|
141 |
+
agenda.mark_alls_fresh()
|
142 |
+
return self._attempt_proof(
|
143 |
+
agenda,
|
144 |
+
accessible_vars | set(current.term.args),
|
145 |
+
atoms | {(current.term, True)},
|
146 |
+
debug + 1,
|
147 |
+
)
|
148 |
+
|
149 |
+
def _attempt_proof_prop(
|
150 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
151 |
+
):
|
152 |
+
# Check if the branch is closed. Return 'True' if it is
|
153 |
+
if (current, True) in atoms:
|
154 |
+
debug.line("CLOSED", 1)
|
155 |
+
return True
|
156 |
+
|
157 |
+
# mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
|
158 |
+
agenda.mark_alls_fresh()
|
159 |
+
return self._attempt_proof(
|
160 |
+
agenda, accessible_vars, atoms | {(current, False)}, debug + 1
|
161 |
+
)
|
162 |
+
|
163 |
+
def _attempt_proof_n_prop(
|
164 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
165 |
+
):
|
166 |
+
# Check if the branch is closed. Return 'True' if it is
|
167 |
+
if (current.term, False) in atoms:
|
168 |
+
debug.line("CLOSED", 1)
|
169 |
+
return True
|
170 |
+
|
171 |
+
# mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars
|
172 |
+
agenda.mark_alls_fresh()
|
173 |
+
return self._attempt_proof(
|
174 |
+
agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1
|
175 |
+
)
|
176 |
+
|
177 |
+
def _attempt_proof_app(
|
178 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
179 |
+
):
|
180 |
+
f, args = current.uncurry()
|
181 |
+
for i, arg in enumerate(args):
|
182 |
+
if not TableauProver.is_atom(arg):
|
183 |
+
ctx = f
|
184 |
+
nv = Variable("X%s" % _counter.get())
|
185 |
+
for j, a in enumerate(args):
|
186 |
+
ctx = ctx(VariableExpression(nv)) if i == j else ctx(a)
|
187 |
+
if context:
|
188 |
+
ctx = context(ctx).simplify()
|
189 |
+
ctx = LambdaExpression(nv, ctx)
|
190 |
+
agenda.put(arg, ctx)
|
191 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
192 |
+
raise Exception("If this method is called, there must be a non-atomic argument")
|
193 |
+
|
194 |
+
def _attempt_proof_n_app(
|
195 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
196 |
+
):
|
197 |
+
f, args = current.term.uncurry()
|
198 |
+
for i, arg in enumerate(args):
|
199 |
+
if not TableauProver.is_atom(arg):
|
200 |
+
ctx = f
|
201 |
+
nv = Variable("X%s" % _counter.get())
|
202 |
+
for j, a in enumerate(args):
|
203 |
+
ctx = ctx(VariableExpression(nv)) if i == j else ctx(a)
|
204 |
+
if context:
|
205 |
+
# combine new context with existing
|
206 |
+
ctx = context(ctx).simplify()
|
207 |
+
ctx = LambdaExpression(nv, -ctx)
|
208 |
+
agenda.put(-arg, ctx)
|
209 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
210 |
+
raise Exception("If this method is called, there must be a non-atomic argument")
|
211 |
+
|
212 |
+
def _attempt_proof_n_eq(
|
213 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
214 |
+
):
|
215 |
+
###########################################################################
|
216 |
+
# Since 'current' is of type '~(a=b)', the path is closed if 'a' == 'b'
|
217 |
+
###########################################################################
|
218 |
+
if current.term.first == current.term.second:
|
219 |
+
debug.line("CLOSED", 1)
|
220 |
+
return True
|
221 |
+
|
222 |
+
agenda[Categories.N_EQ].add((current, context))
|
223 |
+
current._exhausted = True
|
224 |
+
return self._attempt_proof(
|
225 |
+
agenda,
|
226 |
+
accessible_vars | {current.term.first, current.term.second},
|
227 |
+
atoms,
|
228 |
+
debug + 1,
|
229 |
+
)
|
230 |
+
|
231 |
+
def _attempt_proof_d_neg(
|
232 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
233 |
+
):
|
234 |
+
agenda.put(current.term.term, context)
|
235 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
236 |
+
|
237 |
+
def _attempt_proof_n_all(
|
238 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
239 |
+
):
|
240 |
+
agenda[Categories.EXISTS].add(
|
241 |
+
(ExistsExpression(current.term.variable, -current.term.term), context)
|
242 |
+
)
|
243 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
244 |
+
|
245 |
+
def _attempt_proof_n_some(
|
246 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
247 |
+
):
|
248 |
+
agenda[Categories.ALL].add(
|
249 |
+
(AllExpression(current.term.variable, -current.term.term), context)
|
250 |
+
)
|
251 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
252 |
+
|
253 |
+
def _attempt_proof_and(
|
254 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
255 |
+
):
|
256 |
+
agenda.put(current.first, context)
|
257 |
+
agenda.put(current.second, context)
|
258 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
259 |
+
|
260 |
+
def _attempt_proof_n_or(
|
261 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
262 |
+
):
|
263 |
+
agenda.put(-current.term.first, context)
|
264 |
+
agenda.put(-current.term.second, context)
|
265 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
266 |
+
|
267 |
+
def _attempt_proof_n_imp(
|
268 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
269 |
+
):
|
270 |
+
agenda.put(current.term.first, context)
|
271 |
+
agenda.put(-current.term.second, context)
|
272 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
273 |
+
|
274 |
+
def _attempt_proof_or(
|
275 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
276 |
+
):
|
277 |
+
new_agenda = agenda.clone()
|
278 |
+
agenda.put(current.first, context)
|
279 |
+
new_agenda.put(current.second, context)
|
280 |
+
return self._attempt_proof(
|
281 |
+
agenda, accessible_vars, atoms, debug + 1
|
282 |
+
) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
|
283 |
+
|
284 |
+
def _attempt_proof_imp(
|
285 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
286 |
+
):
|
287 |
+
new_agenda = agenda.clone()
|
288 |
+
agenda.put(-current.first, context)
|
289 |
+
new_agenda.put(current.second, context)
|
290 |
+
return self._attempt_proof(
|
291 |
+
agenda, accessible_vars, atoms, debug + 1
|
292 |
+
) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
|
293 |
+
|
294 |
+
def _attempt_proof_n_and(
|
295 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
296 |
+
):
|
297 |
+
new_agenda = agenda.clone()
|
298 |
+
agenda.put(-current.term.first, context)
|
299 |
+
new_agenda.put(-current.term.second, context)
|
300 |
+
return self._attempt_proof(
|
301 |
+
agenda, accessible_vars, atoms, debug + 1
|
302 |
+
) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
|
303 |
+
|
304 |
+
def _attempt_proof_iff(
|
305 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
306 |
+
):
|
307 |
+
new_agenda = agenda.clone()
|
308 |
+
agenda.put(current.first, context)
|
309 |
+
agenda.put(current.second, context)
|
310 |
+
new_agenda.put(-current.first, context)
|
311 |
+
new_agenda.put(-current.second, context)
|
312 |
+
return self._attempt_proof(
|
313 |
+
agenda, accessible_vars, atoms, debug + 1
|
314 |
+
) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
|
315 |
+
|
316 |
+
def _attempt_proof_n_iff(
|
317 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
318 |
+
):
|
319 |
+
new_agenda = agenda.clone()
|
320 |
+
agenda.put(current.term.first, context)
|
321 |
+
agenda.put(-current.term.second, context)
|
322 |
+
new_agenda.put(-current.term.first, context)
|
323 |
+
new_agenda.put(current.term.second, context)
|
324 |
+
return self._attempt_proof(
|
325 |
+
agenda, accessible_vars, atoms, debug + 1
|
326 |
+
) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1)
|
327 |
+
|
328 |
+
def _attempt_proof_eq(
|
329 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
330 |
+
):
|
331 |
+
#########################################################################
|
332 |
+
# Since 'current' is of the form '(a = b)', replace ALL free instances
|
333 |
+
# of 'a' with 'b'
|
334 |
+
#########################################################################
|
335 |
+
agenda.put_atoms(atoms)
|
336 |
+
agenda.replace_all(current.first, current.second)
|
337 |
+
accessible_vars.discard(current.first)
|
338 |
+
agenda.mark_neqs_fresh()
|
339 |
+
return self._attempt_proof(agenda, accessible_vars, set(), debug + 1)
|
340 |
+
|
341 |
+
def _attempt_proof_some(
|
342 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
343 |
+
):
|
344 |
+
new_unique_variable = VariableExpression(unique_variable())
|
345 |
+
agenda.put(current.term.replace(current.variable, new_unique_variable), context)
|
346 |
+
agenda.mark_alls_fresh()
|
347 |
+
return self._attempt_proof(
|
348 |
+
agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1
|
349 |
+
)
|
350 |
+
|
351 |
+
def _attempt_proof_all(
|
352 |
+
self, current, context, agenda, accessible_vars, atoms, debug
|
353 |
+
):
|
354 |
+
try:
|
355 |
+
current._used_vars
|
356 |
+
except AttributeError:
|
357 |
+
current._used_vars = set()
|
358 |
+
|
359 |
+
# if there are accessible_vars on the path
|
360 |
+
if accessible_vars:
|
361 |
+
# get the set of bound variables that have not be used by this AllExpression
|
362 |
+
bv_available = accessible_vars - current._used_vars
|
363 |
+
|
364 |
+
if bv_available:
|
365 |
+
variable_to_use = list(bv_available)[0]
|
366 |
+
debug.line("--> Using '%s'" % variable_to_use, 2)
|
367 |
+
current._used_vars |= {variable_to_use}
|
368 |
+
agenda.put(
|
369 |
+
current.term.replace(current.variable, variable_to_use), context
|
370 |
+
)
|
371 |
+
agenda[Categories.ALL].add((current, context))
|
372 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
373 |
+
|
374 |
+
else:
|
375 |
+
# no more available variables to substitute
|
376 |
+
debug.line("--> Variables Exhausted", 2)
|
377 |
+
current._exhausted = True
|
378 |
+
agenda[Categories.ALL].add((current, context))
|
379 |
+
return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1)
|
380 |
+
|
381 |
+
else:
|
382 |
+
new_unique_variable = VariableExpression(unique_variable())
|
383 |
+
debug.line("--> Using '%s'" % new_unique_variable, 2)
|
384 |
+
current._used_vars |= {new_unique_variable}
|
385 |
+
agenda.put(
|
386 |
+
current.term.replace(current.variable, new_unique_variable), context
|
387 |
+
)
|
388 |
+
agenda[Categories.ALL].add((current, context))
|
389 |
+
agenda.mark_alls_fresh()
|
390 |
+
return self._attempt_proof(
|
391 |
+
agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1
|
392 |
+
)
|
393 |
+
|
394 |
+
@staticmethod
|
395 |
+
def is_atom(e):
|
396 |
+
if isinstance(e, NegatedExpression):
|
397 |
+
e = e.term
|
398 |
+
|
399 |
+
if isinstance(e, ApplicationExpression):
|
400 |
+
for arg in e.args:
|
401 |
+
if not TableauProver.is_atom(arg):
|
402 |
+
return False
|
403 |
+
return True
|
404 |
+
elif isinstance(e, AbstractVariableExpression) or isinstance(
|
405 |
+
e, LambdaExpression
|
406 |
+
):
|
407 |
+
return True
|
408 |
+
else:
|
409 |
+
return False
|
410 |
+
|
411 |
+
|
412 |
+
class TableauProverCommand(BaseProverCommand):
|
413 |
+
def __init__(self, goal=None, assumptions=None, prover=None):
|
414 |
+
"""
|
415 |
+
:param goal: Input expression to prove
|
416 |
+
:type goal: sem.Expression
|
417 |
+
:param assumptions: Input expressions to use as assumptions in
|
418 |
+
the proof.
|
419 |
+
:type assumptions: list(sem.Expression)
|
420 |
+
"""
|
421 |
+
if prover is not None:
|
422 |
+
assert isinstance(prover, TableauProver)
|
423 |
+
else:
|
424 |
+
prover = TableauProver()
|
425 |
+
|
426 |
+
BaseProverCommand.__init__(self, prover, goal, assumptions)
|
427 |
+
|
428 |
+
|
429 |
+
class Agenda:
|
430 |
+
def __init__(self):
|
431 |
+
self.sets = tuple(set() for i in range(21))
|
432 |
+
|
433 |
+
def clone(self):
|
434 |
+
new_agenda = Agenda()
|
435 |
+
set_list = [s.copy() for s in self.sets]
|
436 |
+
|
437 |
+
new_allExs = set()
|
438 |
+
for allEx, _ in set_list[Categories.ALL]:
|
439 |
+
new_allEx = AllExpression(allEx.variable, allEx.term)
|
440 |
+
try:
|
441 |
+
new_allEx._used_vars = {used for used in allEx._used_vars}
|
442 |
+
except AttributeError:
|
443 |
+
new_allEx._used_vars = set()
|
444 |
+
new_allExs.add((new_allEx, None))
|
445 |
+
set_list[Categories.ALL] = new_allExs
|
446 |
+
|
447 |
+
set_list[Categories.N_EQ] = {
|
448 |
+
(NegatedExpression(n_eq.term), ctx)
|
449 |
+
for (n_eq, ctx) in set_list[Categories.N_EQ]
|
450 |
+
}
|
451 |
+
|
452 |
+
new_agenda.sets = tuple(set_list)
|
453 |
+
return new_agenda
|
454 |
+
|
455 |
+
def __getitem__(self, index):
|
456 |
+
return self.sets[index]
|
457 |
+
|
458 |
+
def put(self, expression, context=None):
|
459 |
+
if isinstance(expression, AllExpression):
|
460 |
+
ex_to_add = AllExpression(expression.variable, expression.term)
|
461 |
+
try:
|
462 |
+
ex_to_add._used_vars = {used for used in expression._used_vars}
|
463 |
+
except AttributeError:
|
464 |
+
ex_to_add._used_vars = set()
|
465 |
+
else:
|
466 |
+
ex_to_add = expression
|
467 |
+
self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context))
|
468 |
+
|
469 |
+
def put_all(self, expressions):
|
470 |
+
for expression in expressions:
|
471 |
+
self.put(expression)
|
472 |
+
|
473 |
+
def put_atoms(self, atoms):
|
474 |
+
for atom, neg in atoms:
|
475 |
+
if neg:
|
476 |
+
self[Categories.N_ATOM].add((-atom, None))
|
477 |
+
else:
|
478 |
+
self[Categories.ATOM].add((atom, None))
|
479 |
+
|
480 |
+
def pop_first(self):
|
481 |
+
"""Pop the first expression that appears in the agenda"""
|
482 |
+
for i, s in enumerate(self.sets):
|
483 |
+
if s:
|
484 |
+
if i in [Categories.N_EQ, Categories.ALL]:
|
485 |
+
for ex in s:
|
486 |
+
try:
|
487 |
+
if not ex[0]._exhausted:
|
488 |
+
s.remove(ex)
|
489 |
+
return (ex, i)
|
490 |
+
except AttributeError:
|
491 |
+
s.remove(ex)
|
492 |
+
return (ex, i)
|
493 |
+
else:
|
494 |
+
return (s.pop(), i)
|
495 |
+
return ((None, None), None)
|
496 |
+
|
497 |
+
def replace_all(self, old, new):
|
498 |
+
for s in self.sets:
|
499 |
+
for ex, ctx in s:
|
500 |
+
ex.replace(old.variable, new)
|
501 |
+
if ctx is not None:
|
502 |
+
ctx.replace(old.variable, new)
|
503 |
+
|
504 |
+
def mark_alls_fresh(self):
|
505 |
+
for u, _ in self.sets[Categories.ALL]:
|
506 |
+
u._exhausted = False
|
507 |
+
|
508 |
+
def mark_neqs_fresh(self):
|
509 |
+
for neq, _ in self.sets[Categories.N_EQ]:
|
510 |
+
neq._exhausted = False
|
511 |
+
|
512 |
+
def _categorize_expression(self, current):
|
513 |
+
if isinstance(current, NegatedExpression):
|
514 |
+
return self._categorize_NegatedExpression(current)
|
515 |
+
elif isinstance(current, FunctionVariableExpression):
|
516 |
+
return Categories.PROP
|
517 |
+
elif TableauProver.is_atom(current):
|
518 |
+
return Categories.ATOM
|
519 |
+
elif isinstance(current, AllExpression):
|
520 |
+
return Categories.ALL
|
521 |
+
elif isinstance(current, AndExpression):
|
522 |
+
return Categories.AND
|
523 |
+
elif isinstance(current, OrExpression):
|
524 |
+
return Categories.OR
|
525 |
+
elif isinstance(current, ImpExpression):
|
526 |
+
return Categories.IMP
|
527 |
+
elif isinstance(current, IffExpression):
|
528 |
+
return Categories.IFF
|
529 |
+
elif isinstance(current, EqualityExpression):
|
530 |
+
return Categories.EQ
|
531 |
+
elif isinstance(current, ExistsExpression):
|
532 |
+
return Categories.EXISTS
|
533 |
+
elif isinstance(current, ApplicationExpression):
|
534 |
+
return Categories.APP
|
535 |
+
else:
|
536 |
+
raise ProverParseError("cannot categorize %s" % current.__class__.__name__)
|
537 |
+
|
538 |
+
def _categorize_NegatedExpression(self, current):
|
539 |
+
negated = current.term
|
540 |
+
|
541 |
+
if isinstance(negated, NegatedExpression):
|
542 |
+
return Categories.D_NEG
|
543 |
+
elif isinstance(negated, FunctionVariableExpression):
|
544 |
+
return Categories.N_PROP
|
545 |
+
elif TableauProver.is_atom(negated):
|
546 |
+
return Categories.N_ATOM
|
547 |
+
elif isinstance(negated, AllExpression):
|
548 |
+
return Categories.N_ALL
|
549 |
+
elif isinstance(negated, AndExpression):
|
550 |
+
return Categories.N_AND
|
551 |
+
elif isinstance(negated, OrExpression):
|
552 |
+
return Categories.N_OR
|
553 |
+
elif isinstance(negated, ImpExpression):
|
554 |
+
return Categories.N_IMP
|
555 |
+
elif isinstance(negated, IffExpression):
|
556 |
+
return Categories.N_IFF
|
557 |
+
elif isinstance(negated, EqualityExpression):
|
558 |
+
return Categories.N_EQ
|
559 |
+
elif isinstance(negated, ExistsExpression):
|
560 |
+
return Categories.N_EXISTS
|
561 |
+
elif isinstance(negated, ApplicationExpression):
|
562 |
+
return Categories.N_APP
|
563 |
+
else:
|
564 |
+
raise ProverParseError("cannot categorize %s" % negated.__class__.__name__)
|
565 |
+
|
566 |
+
|
567 |
+
class Debug:
|
568 |
+
def __init__(self, verbose, indent=0, lines=None):
|
569 |
+
self.verbose = verbose
|
570 |
+
self.indent = indent
|
571 |
+
|
572 |
+
if not lines:
|
573 |
+
lines = []
|
574 |
+
self.lines = lines
|
575 |
+
|
576 |
+
def __add__(self, increment):
|
577 |
+
return Debug(self.verbose, self.indent + 1, self.lines)
|
578 |
+
|
579 |
+
def line(self, data, indent=0):
|
580 |
+
if isinstance(data, tuple):
|
581 |
+
ex, ctx = data
|
582 |
+
if ctx:
|
583 |
+
data = f"{ex}, {ctx}"
|
584 |
+
else:
|
585 |
+
data = "%s" % ex
|
586 |
+
|
587 |
+
if isinstance(ex, AllExpression):
|
588 |
+
try:
|
589 |
+
used_vars = "[%s]" % (
|
590 |
+
",".join("%s" % ve.variable.name for ve in ex._used_vars)
|
591 |
+
)
|
592 |
+
data += ": %s" % used_vars
|
593 |
+
except AttributeError:
|
594 |
+
data += ": []"
|
595 |
+
|
596 |
+
newline = "{}{}".format(" " * (self.indent + indent), data)
|
597 |
+
self.lines.append(newline)
|
598 |
+
|
599 |
+
if self.verbose:
|
600 |
+
print(newline)
|
601 |
+
|
602 |
+
|
603 |
+
class Categories:
|
604 |
+
ATOM = 0
|
605 |
+
PROP = 1
|
606 |
+
N_ATOM = 2
|
607 |
+
N_PROP = 3
|
608 |
+
APP = 4
|
609 |
+
N_APP = 5
|
610 |
+
N_EQ = 6
|
611 |
+
D_NEG = 7
|
612 |
+
N_ALL = 8
|
613 |
+
N_EXISTS = 9
|
614 |
+
AND = 10
|
615 |
+
N_OR = 11
|
616 |
+
N_IMP = 12
|
617 |
+
OR = 13
|
618 |
+
IMP = 14
|
619 |
+
N_AND = 15
|
620 |
+
IFF = 16
|
621 |
+
N_IFF = 17
|
622 |
+
EQ = 18
|
623 |
+
EXISTS = 19
|
624 |
+
ALL = 20
|
625 |
+
|
626 |
+
|
627 |
+
def testTableauProver():
|
628 |
+
tableau_test("P | -P")
|
629 |
+
tableau_test("P & -P")
|
630 |
+
tableau_test("Q", ["P", "(P -> Q)"])
|
631 |
+
tableau_test("man(x)")
|
632 |
+
tableau_test("(man(x) -> man(x))")
|
633 |
+
tableau_test("(man(x) -> --man(x))")
|
634 |
+
tableau_test("-(man(x) and -man(x))")
|
635 |
+
tableau_test("(man(x) or -man(x))")
|
636 |
+
tableau_test("(man(x) -> man(x))")
|
637 |
+
tableau_test("-(man(x) and -man(x))")
|
638 |
+
tableau_test("(man(x) or -man(x))")
|
639 |
+
tableau_test("(man(x) -> man(x))")
|
640 |
+
tableau_test("(man(x) iff man(x))")
|
641 |
+
tableau_test("-(man(x) iff -man(x))")
|
642 |
+
tableau_test("all x.man(x)")
|
643 |
+
tableau_test("all x.all y.((x = y) -> (y = x))")
|
644 |
+
tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))")
|
645 |
+
# tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))')
|
646 |
+
# tableau_test('some x.all y.sees(x,y)')
|
647 |
+
|
648 |
+
p1 = "all x.(man(x) -> mortal(x))"
|
649 |
+
p2 = "man(Socrates)"
|
650 |
+
c = "mortal(Socrates)"
|
651 |
+
tableau_test(c, [p1, p2])
|
652 |
+
|
653 |
+
p1 = "all x.(man(x) -> walks(x))"
|
654 |
+
p2 = "man(John)"
|
655 |
+
c = "some y.walks(y)"
|
656 |
+
tableau_test(c, [p1, p2])
|
657 |
+
|
658 |
+
p = "((x = y) & walks(y))"
|
659 |
+
c = "walks(x)"
|
660 |
+
tableau_test(c, [p])
|
661 |
+
|
662 |
+
p = "((x = y) & ((y = z) & (z = w)))"
|
663 |
+
c = "(x = w)"
|
664 |
+
tableau_test(c, [p])
|
665 |
+
|
666 |
+
p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))"
|
667 |
+
c = "some e0.walk(e0,mary)"
|
668 |
+
tableau_test(c, [p])
|
669 |
+
|
670 |
+
c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))"
|
671 |
+
tableau_test(c)
|
672 |
+
|
673 |
+
|
674 |
+
# p = 'some e1.some e2.((believe e1 john e2) and (walk e2 mary))'
|
675 |
+
# c = 'some x.some e3.some e4.((believe e3 x e4) and (walk e4 mary))'
|
676 |
+
# tableau_test(c, [p])
|
677 |
+
|
678 |
+
|
679 |
+
def testHigherOrderTableauProver():
|
680 |
+
tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
|
681 |
+
tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"])
|
682 |
+
tableau_test(
|
683 |
+
"believe(j, lie(b))", ["lie(b)"]
|
684 |
+
) # how do we capture that John believes all things that are true
|
685 |
+
tableau_test(
|
686 |
+
"believe(j, know(b, cheat(b)))",
|
687 |
+
["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"],
|
688 |
+
)
|
689 |
+
tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"])
|
690 |
+
|
691 |
+
tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"])
|
692 |
+
tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
|
693 |
+
|
694 |
+
|
695 |
+
def tableau_test(c, ps=None, verbose=False):
|
696 |
+
pc = Expression.fromstring(c)
|
697 |
+
pps = [Expression.fromstring(p) for p in ps] if ps else []
|
698 |
+
if not ps:
|
699 |
+
ps = []
|
700 |
+
print(
|
701 |
+
"%s |- %s: %s"
|
702 |
+
% (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose))
|
703 |
+
)
|
704 |
+
|
705 |
+
|
706 |
+
def demo():
|
707 |
+
testTableauProver()
|
708 |
+
testHigherOrderTableauProver()
|
709 |
+
|
710 |
+
|
711 |
+
if __name__ == "__main__":
|
712 |
+
demo()
|
venv/lib/python3.10/site-packages/nltk/test/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Unit Tests
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Edward Loper <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Unit tests for the NLTK modules. These tests are intended to ensure
|
10 |
+
that source code changes don't accidentally introduce bugs.
|
11 |
+
For instructions, please see:
|
12 |
+
|
13 |
+
../../web/dev/local_testing.rst
|
14 |
+
|
15 |
+
https://github.com/nltk/nltk/blob/develop/web/dev/local_testing.rst
|
16 |
+
|
17 |
+
|
18 |
+
"""
|
venv/lib/python3.10/site-packages/nltk/test/all.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test suite that runs all NLTK tests.
|
2 |
+
|
3 |
+
This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
|
4 |
+
project's ``setup-eggs.py`` file. Here, we create a test suite that
|
5 |
+
runs all of our doctests, and return it for processing by the setuptools
|
6 |
+
test harness.
|
7 |
+
|
8 |
+
"""
|
9 |
+
import doctest
|
10 |
+
import os.path
|
11 |
+
import unittest
|
12 |
+
from glob import glob
|
13 |
+
|
14 |
+
|
15 |
+
def additional_tests():
|
16 |
+
# print("here-000000000000000")
|
17 |
+
# print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest')))
|
18 |
+
dir = os.path.dirname(__file__)
|
19 |
+
paths = glob(os.path.join(dir, "*.doctest"))
|
20 |
+
files = [os.path.basename(path) for path in paths]
|
21 |
+
return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
|
22 |
+
|
23 |
+
|
24 |
+
# if os.path.split(path)[-1] != 'index.rst'
|
25 |
+
# skips time-dependent doctest in index.rst
|
venv/lib/python3.10/site-packages/nltk/test/bleu.doctest
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
==========
|
2 |
+
BLEU tests
|
3 |
+
==========
|
4 |
+
|
5 |
+
>>> from nltk.translate import bleu
|
6 |
+
|
7 |
+
If the candidate has no alignment to any of the references, the BLEU score is 0.
|
8 |
+
|
9 |
+
>>> bleu(
|
10 |
+
... ['The candidate has no alignment to any of the references'.split()],
|
11 |
+
... 'John loves Mary'.split(),
|
12 |
+
... (1,),
|
13 |
+
... )
|
14 |
+
0
|
15 |
+
|
16 |
+
This is an implementation of the smoothing techniques
|
17 |
+
for segment-level BLEU scores that was presented in
|
18 |
+
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
|
19 |
+
Smoothing Techniques for Sentence-Level BLEU. In WMT14.
|
20 |
+
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
|
21 |
+
>>> from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction
|
22 |
+
|
23 |
+
|
24 |
+
>>> sentence_bleu(
|
25 |
+
... ['It is a place of quiet contemplation .'.split()],
|
26 |
+
... 'It is .'.split(),
|
27 |
+
... smoothing_function=SmoothingFunction().method4,
|
28 |
+
... )*100
|
29 |
+
4.4267...
|
venv/lib/python3.10/site-packages/nltk/test/bnc.doctest
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
>>> import os.path
|
5 |
+
|
6 |
+
>>> from nltk.corpus.reader import BNCCorpusReader
|
7 |
+
>>> import nltk.test
|
8 |
+
|
9 |
+
>>> root = os.path.dirname(nltk.test.__file__)
|
10 |
+
>>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml')
|
11 |
+
|
12 |
+
Checking the word access.
|
13 |
+
-------------------------
|
14 |
+
|
15 |
+
>>> len(bnc.words())
|
16 |
+
151
|
17 |
+
|
18 |
+
>>> bnc.words()[:6]
|
19 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
20 |
+
>>> bnc.words(stem=True)[:6]
|
21 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
22 |
+
|
23 |
+
>>> bnc.tagged_words()[:6]
|
24 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
25 |
+
|
26 |
+
>>> bnc.tagged_words(c5=True)[:6]
|
27 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
28 |
+
|
29 |
+
Testing access to the sentences.
|
30 |
+
--------------------------------
|
31 |
+
|
32 |
+
>>> len(bnc.sents())
|
33 |
+
15
|
34 |
+
|
35 |
+
>>> bnc.sents()[0]
|
36 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
37 |
+
>>> bnc.sents(stem=True)[0]
|
38 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
39 |
+
|
40 |
+
>>> bnc.tagged_sents()[0]
|
41 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
42 |
+
>>> bnc.tagged_sents(c5=True)[0]
|
43 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
44 |
+
|
45 |
+
A not lazy loader.
|
46 |
+
------------------
|
47 |
+
|
48 |
+
>>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False)
|
49 |
+
|
50 |
+
>>> len(eager.words())
|
51 |
+
151
|
52 |
+
>>> eager.words(stem=True)[6:17]
|
53 |
+
['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.']
|
54 |
+
|
55 |
+
>>> eager.tagged_words()[6:11]
|
56 |
+
[('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')]
|
57 |
+
>>> eager.tagged_words(c5=True)[6:17]
|
58 |
+
[('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')]
|
59 |
+
>>> len(eager.sents())
|
60 |
+
15
|
venv/lib/python3.10/site-packages/nltk/test/ccg.doctest
ADDED
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================
|
5 |
+
Combinatory Categorial Grammar
|
6 |
+
==============================
|
7 |
+
|
8 |
+
Relative Clauses
|
9 |
+
----------------
|
10 |
+
|
11 |
+
>>> from nltk.ccg import chart, lexicon
|
12 |
+
|
13 |
+
Construct a lexicon:
|
14 |
+
|
15 |
+
>>> lex = lexicon.fromstring('''
|
16 |
+
... :- S, NP, N, VP
|
17 |
+
...
|
18 |
+
... Det :: NP/N
|
19 |
+
... Pro :: NP
|
20 |
+
... Modal :: S\\NP/VP
|
21 |
+
...
|
22 |
+
... TV :: VP/NP
|
23 |
+
... DTV :: TV/NP
|
24 |
+
...
|
25 |
+
... the => Det
|
26 |
+
...
|
27 |
+
... that => Det
|
28 |
+
... that => NP
|
29 |
+
...
|
30 |
+
... I => Pro
|
31 |
+
... you => Pro
|
32 |
+
... we => Pro
|
33 |
+
...
|
34 |
+
... chef => N
|
35 |
+
... cake => N
|
36 |
+
... children => N
|
37 |
+
... dough => N
|
38 |
+
...
|
39 |
+
... will => Modal
|
40 |
+
... should => Modal
|
41 |
+
... might => Modal
|
42 |
+
... must => Modal
|
43 |
+
...
|
44 |
+
... and => var\\.,var/.,var
|
45 |
+
...
|
46 |
+
... to => VP[to]/VP
|
47 |
+
...
|
48 |
+
... without => (VP\\VP)/VP[ing]
|
49 |
+
...
|
50 |
+
... be => TV
|
51 |
+
... cook => TV
|
52 |
+
... eat => TV
|
53 |
+
...
|
54 |
+
... cooking => VP[ing]/NP
|
55 |
+
...
|
56 |
+
... give => DTV
|
57 |
+
...
|
58 |
+
... is => (S\\NP)/NP
|
59 |
+
... prefer => (S\\NP)/NP
|
60 |
+
...
|
61 |
+
... which => (N\\N)/(S/NP)
|
62 |
+
...
|
63 |
+
... persuade => (VP/VP[to])/NP
|
64 |
+
... ''')
|
65 |
+
|
66 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
67 |
+
>>> for parse in parser.parse("you prefer that cake".split()):
|
68 |
+
... chart.printCCGDerivation(parse)
|
69 |
+
... break
|
70 |
+
...
|
71 |
+
you prefer that cake
|
72 |
+
NP ((S\NP)/NP) (NP/N) N
|
73 |
+
-------------->
|
74 |
+
NP
|
75 |
+
--------------------------->
|
76 |
+
(S\NP)
|
77 |
+
--------------------------------<
|
78 |
+
S
|
79 |
+
|
80 |
+
>>> for parse in parser.parse("that is the cake which you prefer".split()):
|
81 |
+
... chart.printCCGDerivation(parse)
|
82 |
+
... break
|
83 |
+
...
|
84 |
+
that is the cake which you prefer
|
85 |
+
NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/NP)
|
86 |
+
----->T
|
87 |
+
(S/(S\NP))
|
88 |
+
------------------>B
|
89 |
+
(S/NP)
|
90 |
+
---------------------------------->
|
91 |
+
(N\N)
|
92 |
+
----------------------------------------<
|
93 |
+
N
|
94 |
+
------------------------------------------------>
|
95 |
+
NP
|
96 |
+
------------------------------------------------------------->
|
97 |
+
(S\NP)
|
98 |
+
-------------------------------------------------------------------<
|
99 |
+
S
|
100 |
+
|
101 |
+
|
102 |
+
Some other sentences to try:
|
103 |
+
"that is the cake which we will persuade the chef to cook"
|
104 |
+
"that is the cake which we will persuade the chef to give the children"
|
105 |
+
|
106 |
+
>>> sent = "that is the dough which you will eat without cooking".split()
|
107 |
+
>>> nosub_parser = chart.CCGChartParser(lex, chart.ApplicationRuleSet +
|
108 |
+
... chart.CompositionRuleSet + chart.TypeRaiseRuleSet)
|
109 |
+
|
110 |
+
Without Substitution (no output)
|
111 |
+
|
112 |
+
>>> for parse in nosub_parser.parse(sent):
|
113 |
+
... chart.printCCGDerivation(parse)
|
114 |
+
|
115 |
+
With Substitution:
|
116 |
+
|
117 |
+
>>> for parse in parser.parse(sent):
|
118 |
+
... chart.printCCGDerivation(parse)
|
119 |
+
... break
|
120 |
+
...
|
121 |
+
that is the dough which you will eat without cooking
|
122 |
+
NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/VP) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
123 |
+
----->T
|
124 |
+
(S/(S\NP))
|
125 |
+
------------------------------------->B
|
126 |
+
((VP\VP)/NP)
|
127 |
+
----------------------------------------------<Sx
|
128 |
+
(VP/NP)
|
129 |
+
----------------------------------------------------------->B
|
130 |
+
((S\NP)/NP)
|
131 |
+
---------------------------------------------------------------->B
|
132 |
+
(S/NP)
|
133 |
+
-------------------------------------------------------------------------------->
|
134 |
+
(N\N)
|
135 |
+
---------------------------------------------------------------------------------------<
|
136 |
+
N
|
137 |
+
----------------------------------------------------------------------------------------------->
|
138 |
+
NP
|
139 |
+
------------------------------------------------------------------------------------------------------------>
|
140 |
+
(S\NP)
|
141 |
+
------------------------------------------------------------------------------------------------------------------<
|
142 |
+
S
|
143 |
+
|
144 |
+
|
145 |
+
Conjunction
|
146 |
+
-----------
|
147 |
+
|
148 |
+
>>> from nltk.ccg.chart import CCGChartParser, ApplicationRuleSet, CompositionRuleSet
|
149 |
+
>>> from nltk.ccg.chart import SubstitutionRuleSet, TypeRaiseRuleSet, printCCGDerivation
|
150 |
+
>>> from nltk.ccg import lexicon
|
151 |
+
|
152 |
+
Lexicons for the tests:
|
153 |
+
|
154 |
+
>>> test1_lex = '''
|
155 |
+
... :- S,N,NP,VP
|
156 |
+
... I => NP
|
157 |
+
... you => NP
|
158 |
+
... will => S\\NP/VP
|
159 |
+
... cook => VP/NP
|
160 |
+
... which => (N\\N)/(S/NP)
|
161 |
+
... and => var\\.,var/.,var
|
162 |
+
... might => S\\NP/VP
|
163 |
+
... eat => VP/NP
|
164 |
+
... the => NP/N
|
165 |
+
... mushrooms => N
|
166 |
+
... parsnips => N'''
|
167 |
+
>>> test2_lex = '''
|
168 |
+
... :- N, S, NP, VP
|
169 |
+
... articles => N
|
170 |
+
... the => NP/N
|
171 |
+
... and => var\\.,var/.,var
|
172 |
+
... which => (N\\N)/(S/NP)
|
173 |
+
... I => NP
|
174 |
+
... anyone => NP
|
175 |
+
... will => (S/VP)\\NP
|
176 |
+
... file => VP/NP
|
177 |
+
... without => (VP\\VP)/VP[ing]
|
178 |
+
... forget => VP/NP
|
179 |
+
... reading => VP[ing]/NP
|
180 |
+
... '''
|
181 |
+
|
182 |
+
Tests handling of conjunctions.
|
183 |
+
Note that while the two derivations are different, they are semantically equivalent.
|
184 |
+
|
185 |
+
>>> lex = lexicon.fromstring(test1_lex)
|
186 |
+
>>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
|
187 |
+
>>> for parse in parser.parse("I will cook and might eat the mushrooms and parsnips".split()):
|
188 |
+
... printCCGDerivation(parse)
|
189 |
+
I will cook and might eat the mushrooms and parsnips
|
190 |
+
NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
|
191 |
+
---------------------->B
|
192 |
+
((S\NP)/NP)
|
193 |
+
---------------------->B
|
194 |
+
((S\NP)/NP)
|
195 |
+
------------------------------------------------->
|
196 |
+
(((S\NP)/NP)\.,((S\NP)/NP))
|
197 |
+
-----------------------------------------------------------------------<
|
198 |
+
((S\NP)/NP)
|
199 |
+
------------------------------------->
|
200 |
+
(N\.,N)
|
201 |
+
------------------------------------------------<
|
202 |
+
N
|
203 |
+
-------------------------------------------------------->
|
204 |
+
NP
|
205 |
+
------------------------------------------------------------------------------------------------------------------------------->
|
206 |
+
(S\NP)
|
207 |
+
-----------------------------------------------------------------------------------------------------------------------------------<
|
208 |
+
S
|
209 |
+
I will cook and might eat the mushrooms and parsnips
|
210 |
+
NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
|
211 |
+
---------------------->B
|
212 |
+
((S\NP)/NP)
|
213 |
+
---------------------->B
|
214 |
+
((S\NP)/NP)
|
215 |
+
------------------------------------------------->
|
216 |
+
(((S\NP)/NP)\.,((S\NP)/NP))
|
217 |
+
-----------------------------------------------------------------------<
|
218 |
+
((S\NP)/NP)
|
219 |
+
------------------------------------------------------------------------------->B
|
220 |
+
((S\NP)/N)
|
221 |
+
------------------------------------->
|
222 |
+
(N\.,N)
|
223 |
+
------------------------------------------------<
|
224 |
+
N
|
225 |
+
------------------------------------------------------------------------------------------------------------------------------->
|
226 |
+
(S\NP)
|
227 |
+
-----------------------------------------------------------------------------------------------------------------------------------<
|
228 |
+
S
|
229 |
+
|
230 |
+
|
231 |
+
Tests handling subject extraction.
|
232 |
+
Interesting to point that the two parses are clearly semantically different.
|
233 |
+
|
234 |
+
>>> lex = lexicon.fromstring(test2_lex)
|
235 |
+
>>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
|
236 |
+
>>> for parse in parser.parse("articles which I will file and forget without reading".split()):
|
237 |
+
... printCCGDerivation(parse)
|
238 |
+
articles which I will file and forget without reading
|
239 |
+
N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
240 |
+
-----------------<
|
241 |
+
(S/VP)
|
242 |
+
------------------------------------->B
|
243 |
+
((VP\VP)/NP)
|
244 |
+
----------------------------------------------<Sx
|
245 |
+
(VP/NP)
|
246 |
+
------------------------------------------------------------------------->
|
247 |
+
((VP/NP)\.,(VP/NP))
|
248 |
+
----------------------------------------------------------------------------------<
|
249 |
+
(VP/NP)
|
250 |
+
--------------------------------------------------------------------------------------------------->B
|
251 |
+
(S/NP)
|
252 |
+
------------------------------------------------------------------------------------------------------------------->
|
253 |
+
(N\N)
|
254 |
+
-----------------------------------------------------------------------------------------------------------------------------<
|
255 |
+
N
|
256 |
+
articles which I will file and forget without reading
|
257 |
+
N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
258 |
+
-----------------<
|
259 |
+
(S/VP)
|
260 |
+
------------------------------------>
|
261 |
+
((VP/NP)\.,(VP/NP))
|
262 |
+
---------------------------------------------<
|
263 |
+
(VP/NP)
|
264 |
+
------------------------------------->B
|
265 |
+
((VP\VP)/NP)
|
266 |
+
----------------------------------------------------------------------------------<Sx
|
267 |
+
(VP/NP)
|
268 |
+
--------------------------------------------------------------------------------------------------->B
|
269 |
+
(S/NP)
|
270 |
+
------------------------------------------------------------------------------------------------------------------->
|
271 |
+
(N\N)
|
272 |
+
-----------------------------------------------------------------------------------------------------------------------------<
|
273 |
+
N
|
274 |
+
|
275 |
+
|
276 |
+
Unicode support
|
277 |
+
---------------
|
278 |
+
|
279 |
+
Unicode words are supported.
|
280 |
+
|
281 |
+
>>> from nltk.ccg import chart, lexicon
|
282 |
+
|
283 |
+
Lexicons for the tests:
|
284 |
+
|
285 |
+
>>> lex = lexicon.fromstring('''
|
286 |
+
... :- S, N, NP, PP
|
287 |
+
...
|
288 |
+
... AdjI :: N\\N
|
289 |
+
... AdjD :: N/N
|
290 |
+
... AdvD :: S/S
|
291 |
+
... AdvI :: S\\S
|
292 |
+
... Det :: NP/N
|
293 |
+
... PrepNPCompl :: PP/NP
|
294 |
+
... PrepNAdjN :: S\\S/N
|
295 |
+
... PrepNAdjNP :: S\\S/NP
|
296 |
+
... VPNP :: S\\NP/NP
|
297 |
+
... VPPP :: S\\NP/PP
|
298 |
+
... VPser :: S\\NP/AdjI
|
299 |
+
...
|
300 |
+
... auto => N
|
301 |
+
... bebidas => N
|
302 |
+
... cine => N
|
303 |
+
... ley => N
|
304 |
+
... libro => N
|
305 |
+
... ministro => N
|
306 |
+
... panadería => N
|
307 |
+
... presidente => N
|
308 |
+
... super => N
|
309 |
+
...
|
310 |
+
... el => Det
|
311 |
+
... la => Det
|
312 |
+
... las => Det
|
313 |
+
... un => Det
|
314 |
+
...
|
315 |
+
... Ana => NP
|
316 |
+
... Pablo => NP
|
317 |
+
...
|
318 |
+
... y => var\\.,var/.,var
|
319 |
+
...
|
320 |
+
... pero => (S/NP)\\(S/NP)/(S/NP)
|
321 |
+
...
|
322 |
+
... anunció => VPNP
|
323 |
+
... compró => VPNP
|
324 |
+
... cree => S\\NP/S[dep]
|
325 |
+
... desmintió => VPNP
|
326 |
+
... lee => VPNP
|
327 |
+
... fueron => VPPP
|
328 |
+
...
|
329 |
+
... es => VPser
|
330 |
+
...
|
331 |
+
... interesante => AdjD
|
332 |
+
... interesante => AdjI
|
333 |
+
... nueva => AdjD
|
334 |
+
... nueva => AdjI
|
335 |
+
...
|
336 |
+
... a => PrepNPCompl
|
337 |
+
... en => PrepNAdjN
|
338 |
+
... en => PrepNAdjNP
|
339 |
+
...
|
340 |
+
... ayer => AdvI
|
341 |
+
...
|
342 |
+
... que => (NP\\NP)/(S/NP)
|
343 |
+
... que => S[dep]/S
|
344 |
+
... ''')
|
345 |
+
|
346 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
347 |
+
>>> for parse in parser.parse(u"el ministro anunció pero el presidente desmintió la nueva ley".split()):
|
348 |
+
... printCCGDerivation(parse) # doctest: +SKIP
|
349 |
+
... # it fails on python2.7 because of the unicode problem explained in https://github.com/nltk/nltk/pull/1354
|
350 |
+
... break
|
351 |
+
el ministro anunció pero el presidente desmintió la nueva ley
|
352 |
+
(NP/N) N ((S\NP)/NP) (((S/NP)\(S/NP))/(S/NP)) (NP/N) N ((S\NP)/NP) (NP/N) (N/N) N
|
353 |
+
------------------>
|
354 |
+
NP
|
355 |
+
------------------>T
|
356 |
+
(S/(S\NP))
|
357 |
+
-------------------->
|
358 |
+
NP
|
359 |
+
-------------------->T
|
360 |
+
(S/(S\NP))
|
361 |
+
--------------------------------->B
|
362 |
+
(S/NP)
|
363 |
+
----------------------------------------------------------->
|
364 |
+
((S/NP)\(S/NP))
|
365 |
+
------------>
|
366 |
+
N
|
367 |
+
-------------------->
|
368 |
+
NP
|
369 |
+
--------------------<T
|
370 |
+
(S\(S/NP))
|
371 |
+
-------------------------------------------------------------------------------<B
|
372 |
+
(S\(S/NP))
|
373 |
+
--------------------------------------------------------------------------------------------<B
|
374 |
+
(S/NP)
|
375 |
+
-------------------------------------------------------------------------------------------------------------->
|
376 |
+
S
|
venv/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================
|
5 |
+
Combinatory Categorial Grammar with semantics
|
6 |
+
==============================================
|
7 |
+
|
8 |
+
-----
|
9 |
+
Chart
|
10 |
+
-----
|
11 |
+
|
12 |
+
|
13 |
+
>>> from nltk.ccg import chart, lexicon
|
14 |
+
>>> from nltk.ccg.chart import printCCGDerivation
|
15 |
+
|
16 |
+
No semantics
|
17 |
+
-------------------
|
18 |
+
|
19 |
+
>>> lex = lexicon.fromstring('''
|
20 |
+
... :- S, NP, N
|
21 |
+
... She => NP
|
22 |
+
... has => (S\\NP)/NP
|
23 |
+
... books => NP
|
24 |
+
... ''',
|
25 |
+
... False)
|
26 |
+
|
27 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
28 |
+
>>> parses = list(parser.parse("She has books".split()))
|
29 |
+
>>> print(str(len(parses)) + " parses")
|
30 |
+
3 parses
|
31 |
+
|
32 |
+
>>> printCCGDerivation(parses[0])
|
33 |
+
She has books
|
34 |
+
NP ((S\NP)/NP) NP
|
35 |
+
-------------------->
|
36 |
+
(S\NP)
|
37 |
+
-------------------------<
|
38 |
+
S
|
39 |
+
|
40 |
+
>>> printCCGDerivation(parses[1])
|
41 |
+
She has books
|
42 |
+
NP ((S\NP)/NP) NP
|
43 |
+
----->T
|
44 |
+
(S/(S\NP))
|
45 |
+
-------------------->
|
46 |
+
(S\NP)
|
47 |
+
------------------------->
|
48 |
+
S
|
49 |
+
|
50 |
+
|
51 |
+
>>> printCCGDerivation(parses[2])
|
52 |
+
She has books
|
53 |
+
NP ((S\NP)/NP) NP
|
54 |
+
----->T
|
55 |
+
(S/(S\NP))
|
56 |
+
------------------>B
|
57 |
+
(S/NP)
|
58 |
+
------------------------->
|
59 |
+
S
|
60 |
+
|
61 |
+
Simple semantics
|
62 |
+
-------------------
|
63 |
+
|
64 |
+
>>> lex = lexicon.fromstring('''
|
65 |
+
... :- S, NP, N
|
66 |
+
... She => NP {she}
|
67 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
68 |
+
... a => NP/N {\\P.exists z.P(z)}
|
69 |
+
... book => N {book}
|
70 |
+
... ''',
|
71 |
+
... True)
|
72 |
+
|
73 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
74 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
75 |
+
>>> print(str(len(parses)) + " parses")
|
76 |
+
7 parses
|
77 |
+
|
78 |
+
>>> printCCGDerivation(parses[0])
|
79 |
+
She has a book
|
80 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
81 |
+
------------------------------------->
|
82 |
+
NP {exists z.book(z)}
|
83 |
+
------------------------------------------------------------------->
|
84 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
85 |
+
-----------------------------------------------------------------------------<
|
86 |
+
S {have(she,exists z.book(z))}
|
87 |
+
|
88 |
+
>>> printCCGDerivation(parses[1])
|
89 |
+
She has a book
|
90 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
91 |
+
--------------------------------------------------------->B
|
92 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
93 |
+
------------------------------------------------------------------->
|
94 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
95 |
+
-----------------------------------------------------------------------------<
|
96 |
+
S {have(she,exists z.book(z))}
|
97 |
+
|
98 |
+
>>> printCCGDerivation(parses[2])
|
99 |
+
She has a book
|
100 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
101 |
+
---------->T
|
102 |
+
(S/(S\NP)) {\F.F(she)}
|
103 |
+
------------------------------------->
|
104 |
+
NP {exists z.book(z)}
|
105 |
+
------------------------------------------------------------------->
|
106 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
107 |
+
----------------------------------------------------------------------------->
|
108 |
+
S {have(she,exists z.book(z))}
|
109 |
+
|
110 |
+
>>> printCCGDerivation(parses[3])
|
111 |
+
She has a book
|
112 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
113 |
+
---------->T
|
114 |
+
(S/(S\NP)) {\F.F(she)}
|
115 |
+
--------------------------------------------------------->B
|
116 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
117 |
+
------------------------------------------------------------------->
|
118 |
+
(S\NP) {\y.have(y,exists z.book(z))}
|
119 |
+
----------------------------------------------------------------------------->
|
120 |
+
S {have(she,exists z.book(z))}
|
121 |
+
|
122 |
+
>>> printCCGDerivation(parses[4])
|
123 |
+
She has a book
|
124 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
125 |
+
---------->T
|
126 |
+
(S/(S\NP)) {\F.F(she)}
|
127 |
+
---------------------------------------->B
|
128 |
+
(S/NP) {\x.have(she,x)}
|
129 |
+
------------------------------------->
|
130 |
+
NP {exists z.book(z)}
|
131 |
+
----------------------------------------------------------------------------->
|
132 |
+
S {have(she,exists z.book(z))}
|
133 |
+
|
134 |
+
>>> printCCGDerivation(parses[5])
|
135 |
+
She has a book
|
136 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
137 |
+
---------->T
|
138 |
+
(S/(S\NP)) {\F.F(she)}
|
139 |
+
--------------------------------------------------------->B
|
140 |
+
((S\NP)/N) {\P y.have(y,exists z.P(z))}
|
141 |
+
------------------------------------------------------------------->B
|
142 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
143 |
+
----------------------------------------------------------------------------->
|
144 |
+
S {have(she,exists z.book(z))}
|
145 |
+
|
146 |
+
>>> printCCGDerivation(parses[6])
|
147 |
+
She has a book
|
148 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
|
149 |
+
---------->T
|
150 |
+
(S/(S\NP)) {\F.F(she)}
|
151 |
+
---------------------------------------->B
|
152 |
+
(S/NP) {\x.have(she,x)}
|
153 |
+
------------------------------------------------------------------->B
|
154 |
+
(S/N) {\P.have(she,exists z.P(z))}
|
155 |
+
----------------------------------------------------------------------------->
|
156 |
+
S {have(she,exists z.book(z))}
|
157 |
+
|
158 |
+
Complex semantics
|
159 |
+
-------------------
|
160 |
+
|
161 |
+
>>> lex = lexicon.fromstring('''
|
162 |
+
... :- S, NP, N
|
163 |
+
... She => NP {she}
|
164 |
+
... has => (S\\NP)/NP {\\x y.have(y, x)}
|
165 |
+
... a => ((S\\NP)\\((S\\NP)/NP))/N {\\P R x.(exists z.P(z) & R(z,x))}
|
166 |
+
... book => N {book}
|
167 |
+
... ''',
|
168 |
+
... True)
|
169 |
+
|
170 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
171 |
+
>>> parses = list(parser.parse("She has a book".split()))
|
172 |
+
>>> print(str(len(parses)) + " parses")
|
173 |
+
2 parses
|
174 |
+
|
175 |
+
>>> printCCGDerivation(parses[0])
|
176 |
+
She has a book
|
177 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
178 |
+
---------------------------------------------------------------------->
|
179 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
180 |
+
----------------------------------------------------------------------------------------------------<
|
181 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
182 |
+
--------------------------------------------------------------------------------------------------------------<
|
183 |
+
S {(exists z.book(z) & have(she,z))}
|
184 |
+
|
185 |
+
>>> printCCGDerivation(parses[1])
|
186 |
+
She has a book
|
187 |
+
NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
|
188 |
+
---------->T
|
189 |
+
(S/(S\NP)) {\F.F(she)}
|
190 |
+
---------------------------------------------------------------------->
|
191 |
+
((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
|
192 |
+
----------------------------------------------------------------------------------------------------<
|
193 |
+
(S\NP) {\x.(exists z.book(z) & have(x,z))}
|
194 |
+
-------------------------------------------------------------------------------------------------------------->
|
195 |
+
S {(exists z.book(z) & have(she,z))}
|
196 |
+
|
197 |
+
Using conjunctions
|
198 |
+
---------------------
|
199 |
+
|
200 |
+
# TODO: The semantics of "and" should have been more flexible
|
201 |
+
>>> lex = lexicon.fromstring('''
|
202 |
+
... :- S, NP, N
|
203 |
+
... I => NP {I}
|
204 |
+
... cook => (S\\NP)/NP {\\x y.cook(x,y)}
|
205 |
+
... and => var\\.,var/.,var {\\P Q x y.(P(x,y) & Q(x,y))}
|
206 |
+
... eat => (S\\NP)/NP {\\x y.eat(x,y)}
|
207 |
+
... the => NP/N {\\x.the(x)}
|
208 |
+
... bacon => N {bacon}
|
209 |
+
... ''',
|
210 |
+
... True)
|
211 |
+
|
212 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
213 |
+
>>> parses = list(parser.parse("I cook and eat the bacon".split()))
|
214 |
+
>>> print(str(len(parses)) + " parses")
|
215 |
+
7 parses
|
216 |
+
|
217 |
+
>>> printCCGDerivation(parses[0])
|
218 |
+
I cook and eat the bacon
|
219 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
220 |
+
------------------------------------------------------------------------------------->
|
221 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
222 |
+
-------------------------------------------------------------------------------------------------------------------<
|
223 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
224 |
+
------------------------------->
|
225 |
+
NP {the(bacon)}
|
226 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
227 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
228 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
229 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
230 |
+
|
231 |
+
>>> printCCGDerivation(parses[1])
|
232 |
+
I cook and eat the bacon
|
233 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
234 |
+
------------------------------------------------------------------------------------->
|
235 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
236 |
+
-------------------------------------------------------------------------------------------------------------------<
|
237 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
238 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
239 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
240 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
241 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
242 |
+
----------------------------------------------------------------------------------------------------------------------------------------------------------<
|
243 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
244 |
+
|
245 |
+
>>> printCCGDerivation(parses[2])
|
246 |
+
I cook and eat the bacon
|
247 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
248 |
+
-------->T
|
249 |
+
(S/(S\NP)) {\F.F(I)}
|
250 |
+
------------------------------------------------------------------------------------->
|
251 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
252 |
+
-------------------------------------------------------------------------------------------------------------------<
|
253 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
254 |
+
------------------------------->
|
255 |
+
NP {the(bacon)}
|
256 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
257 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
258 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
259 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
260 |
+
|
261 |
+
>>> printCCGDerivation(parses[3])
|
262 |
+
I cook and eat the bacon
|
263 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
264 |
+
-------->T
|
265 |
+
(S/(S\NP)) {\F.F(I)}
|
266 |
+
------------------------------------------------------------------------------------->
|
267 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
268 |
+
-------------------------------------------------------------------------------------------------------------------<
|
269 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
270 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
271 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
272 |
+
-------------------------------------------------------------------------------------------------------------------------------------------------->
|
273 |
+
(S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
|
274 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
275 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
276 |
+
|
277 |
+
>>> printCCGDerivation(parses[4])
|
278 |
+
I cook and eat the bacon
|
279 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
280 |
+
-------->T
|
281 |
+
(S/(S\NP)) {\F.F(I)}
|
282 |
+
------------------------------------------------------------------------------------->
|
283 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
284 |
+
-------------------------------------------------------------------------------------------------------------------<
|
285 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
286 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
287 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
288 |
+
------------------------------->
|
289 |
+
NP {the(bacon)}
|
290 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
291 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
292 |
+
|
293 |
+
>>> printCCGDerivation(parses[5])
|
294 |
+
I cook and eat the bacon
|
295 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
296 |
+
-------->T
|
297 |
+
(S/(S\NP)) {\F.F(I)}
|
298 |
+
------------------------------------------------------------------------------------->
|
299 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
300 |
+
-------------------------------------------------------------------------------------------------------------------<
|
301 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
302 |
+
--------------------------------------------------------------------------------------------------------------------------------------->B
|
303 |
+
((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
|
304 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
305 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
306 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
307 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
308 |
+
|
309 |
+
>>> printCCGDerivation(parses[6])
|
310 |
+
I cook and eat the bacon
|
311 |
+
NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
|
312 |
+
-------->T
|
313 |
+
(S/(S\NP)) {\F.F(I)}
|
314 |
+
------------------------------------------------------------------------------------->
|
315 |
+
(((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
|
316 |
+
-------------------------------------------------------------------------------------------------------------------<
|
317 |
+
((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
|
318 |
+
--------------------------------------------------------------------------------------------------------------------------->B
|
319 |
+
(S/NP) {\x.(eat(x,I) & cook(x,I))}
|
320 |
+
----------------------------------------------------------------------------------------------------------------------------------------------->B
|
321 |
+
(S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
|
322 |
+
---------------------------------------------------------------------------------------------------------------------------------------------------------->
|
323 |
+
S {(eat(the(bacon),I) & cook(the(bacon),I))}
|
324 |
+
|
325 |
+
Tests from published papers
|
326 |
+
------------------------------
|
327 |
+
|
328 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
329 |
+
|
330 |
+
>>> lex = lexicon.fromstring('''
|
331 |
+
... :- S, NP
|
332 |
+
... I => NP {I}
|
333 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
334 |
+
... them => NP {them}
|
335 |
+
... money => NP {money}
|
336 |
+
... ''',
|
337 |
+
... True)
|
338 |
+
|
339 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
340 |
+
>>> parses = list(parser.parse("I give them money".split()))
|
341 |
+
>>> print(str(len(parses)) + " parses")
|
342 |
+
3 parses
|
343 |
+
|
344 |
+
>>> printCCGDerivation(parses[0])
|
345 |
+
I give them money
|
346 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
347 |
+
-------------------------------------------------->
|
348 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
349 |
+
-------------------------------------------------------------->
|
350 |
+
(S\NP) {\z.give(money,them,z)}
|
351 |
+
----------------------------------------------------------------------<
|
352 |
+
S {give(money,them,I)}
|
353 |
+
|
354 |
+
>>> printCCGDerivation(parses[1])
|
355 |
+
I give them money
|
356 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
357 |
+
-------->T
|
358 |
+
(S/(S\NP)) {\F.F(I)}
|
359 |
+
-------------------------------------------------->
|
360 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
361 |
+
-------------------------------------------------------------->
|
362 |
+
(S\NP) {\z.give(money,them,z)}
|
363 |
+
---------------------------------------------------------------------->
|
364 |
+
S {give(money,them,I)}
|
365 |
+
|
366 |
+
|
367 |
+
>>> printCCGDerivation(parses[2])
|
368 |
+
I give them money
|
369 |
+
NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
|
370 |
+
-------->T
|
371 |
+
(S/(S\NP)) {\F.F(I)}
|
372 |
+
-------------------------------------------------->
|
373 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
374 |
+
---------------------------------------------------------->B
|
375 |
+
(S/NP) {\y.give(y,them,I)}
|
376 |
+
---------------------------------------------------------------------->
|
377 |
+
S {give(money,them,I)}
|
378 |
+
|
379 |
+
|
380 |
+
An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
|
381 |
+
|
382 |
+
>>> lex = lexicon.fromstring('''
|
383 |
+
... :- N, NP, S
|
384 |
+
... money => N {money}
|
385 |
+
... that => (N\\N)/(S/NP) {\\P Q x.(P(x) & Q(x))}
|
386 |
+
... I => NP {I}
|
387 |
+
... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
|
388 |
+
... them => NP {them}
|
389 |
+
... ''',
|
390 |
+
... True)
|
391 |
+
|
392 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
393 |
+
>>> parses = list(parser.parse("money that I give them".split()))
|
394 |
+
>>> print(str(len(parses)) + " parses")
|
395 |
+
3 parses
|
396 |
+
|
397 |
+
>>> printCCGDerivation(parses[0])
|
398 |
+
money that I give them
|
399 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
400 |
+
-------->T
|
401 |
+
(S/(S\NP)) {\F.F(I)}
|
402 |
+
-------------------------------------------------->
|
403 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
404 |
+
---------------------------------------------------------->B
|
405 |
+
(S/NP) {\y.give(y,them,I)}
|
406 |
+
------------------------------------------------------------------------------------------------->
|
407 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
408 |
+
------------------------------------------------------------------------------------------------------------<
|
409 |
+
N {\x.(give(x,them,I) & money(x))}
|
410 |
+
|
411 |
+
>>> printCCGDerivation(parses[1])
|
412 |
+
money that I give them
|
413 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
414 |
+
----------->T
|
415 |
+
(N/(N\N)) {\F.F(money)}
|
416 |
+
-------->T
|
417 |
+
(S/(S\NP)) {\F.F(I)}
|
418 |
+
-------------------------------------------------->
|
419 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
420 |
+
---------------------------------------------------------->B
|
421 |
+
(S/NP) {\y.give(y,them,I)}
|
422 |
+
------------------------------------------------------------------------------------------------->
|
423 |
+
(N\N) {\Q x.(give(x,them,I) & Q(x))}
|
424 |
+
------------------------------------------------------------------------------------------------------------>
|
425 |
+
N {\x.(give(x,them,I) & money(x))}
|
426 |
+
|
427 |
+
>>> printCCGDerivation(parses[2])
|
428 |
+
money that I give them
|
429 |
+
N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
|
430 |
+
----------->T
|
431 |
+
(N/(N\N)) {\F.F(money)}
|
432 |
+
-------------------------------------------------->B
|
433 |
+
(N/(S/NP)) {\P x.(P(x) & money(x))}
|
434 |
+
-------->T
|
435 |
+
(S/(S\NP)) {\F.F(I)}
|
436 |
+
-------------------------------------------------->
|
437 |
+
((S\NP)/NP) {\y z.give(y,them,z)}
|
438 |
+
---------------------------------------------------------->B
|
439 |
+
(S/NP) {\y.give(y,them,I)}
|
440 |
+
------------------------------------------------------------------------------------------------------------>
|
441 |
+
N {\x.(give(x,them,I) & money(x))}
|
442 |
+
|
443 |
+
|
444 |
+
-------
|
445 |
+
Lexicon
|
446 |
+
-------
|
447 |
+
|
448 |
+
>>> from nltk.ccg import lexicon
|
449 |
+
|
450 |
+
Parse lexicon with semantics
|
451 |
+
|
452 |
+
>>> print(str(lexicon.fromstring(
|
453 |
+
... '''
|
454 |
+
... :- S,NP
|
455 |
+
...
|
456 |
+
... IntransVsg :: S\\NP[sg]
|
457 |
+
...
|
458 |
+
... sleeps => IntransVsg {\\x.sleep(x)}
|
459 |
+
... eats => S\\NP[sg]/NP {\\x y.eat(x,y)}
|
460 |
+
...
|
461 |
+
... and => var\\var/var {\\x y.x & y}
|
462 |
+
... ''',
|
463 |
+
... True
|
464 |
+
... )))
|
465 |
+
and => ((_var0\_var0)/_var0) {(\x y.x & y)}
|
466 |
+
eats => ((S\NP['sg'])/NP) {\x y.eat(x,y)}
|
467 |
+
sleeps => (S\NP['sg']) {\x.sleep(x)}
|
468 |
+
|
469 |
+
Parse lexicon without semantics
|
470 |
+
|
471 |
+
>>> print(str(lexicon.fromstring(
|
472 |
+
... '''
|
473 |
+
... :- S,NP
|
474 |
+
...
|
475 |
+
... IntransVsg :: S\\NP[sg]
|
476 |
+
...
|
477 |
+
... sleeps => IntransVsg
|
478 |
+
... eats => S\\NP[sg]/NP {sem=\\x y.eat(x,y)}
|
479 |
+
...
|
480 |
+
... and => var\\var/var
|
481 |
+
... ''',
|
482 |
+
... False
|
483 |
+
... )))
|
484 |
+
and => ((_var0\_var0)/_var0)
|
485 |
+
eats => ((S\NP['sg'])/NP)
|
486 |
+
sleeps => (S\NP['sg'])
|
487 |
+
|
488 |
+
Semantics are missing
|
489 |
+
|
490 |
+
>>> print(str(lexicon.fromstring(
|
491 |
+
... '''
|
492 |
+
... :- S,NP
|
493 |
+
...
|
494 |
+
... eats => S\\NP[sg]/NP
|
495 |
+
... ''',
|
496 |
+
... True
|
497 |
+
... )))
|
498 |
+
Traceback (most recent call last):
|
499 |
+
...
|
500 |
+
AssertionError: eats => S\NP[sg]/NP must contain semantics because include_semantics is set to True
|
501 |
+
|
502 |
+
|
503 |
+
------------------------------------
|
504 |
+
CCG combinator semantics computation
|
505 |
+
------------------------------------
|
506 |
+
|
507 |
+
>>> from nltk.sem.logic import *
|
508 |
+
>>> from nltk.ccg.logic import *
|
509 |
+
|
510 |
+
>>> read_expr = Expression.fromstring
|
511 |
+
|
512 |
+
Compute semantics from function application
|
513 |
+
|
514 |
+
>>> print(str(compute_function_semantics(read_expr(r'\x.P(x)'), read_expr(r'book'))))
|
515 |
+
P(book)
|
516 |
+
|
517 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'read'))))
|
518 |
+
read(book)
|
519 |
+
|
520 |
+
>>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'\x.read(x)'))))
|
521 |
+
read(book)
|
522 |
+
|
523 |
+
Compute semantics from composition
|
524 |
+
|
525 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'\x.Q(x)'))))
|
526 |
+
\x.P(Q(x))
|
527 |
+
|
528 |
+
>>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
529 |
+
Traceback (most recent call last):
|
530 |
+
...
|
531 |
+
AssertionError: `read` must be a lambda expression
|
532 |
+
|
533 |
+
Compute semantics from substitution
|
534 |
+
|
535 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x y.P(x,y)'), read_expr(r'\x.Q(x)'))))
|
536 |
+
\x.P(x,Q(x))
|
537 |
+
|
538 |
+
>>> print(str(compute_substitution_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
|
539 |
+
Traceback (most recent call last):
|
540 |
+
...
|
541 |
+
AssertionError: `\x.P(x)` must be a lambda expression with 2 arguments
|
542 |
+
|
543 |
+
Compute type-raise semantics
|
544 |
+
|
545 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.P(x)'))))
|
546 |
+
\F x.F(P(x))
|
547 |
+
|
548 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x.F(x)'))))
|
549 |
+
\F1 x.F1(F(x))
|
550 |
+
|
551 |
+
>>> print(str(compute_type_raised_semantics(read_expr(r'\x y z.P(x,y,z)'))))
|
552 |
+
\F x y z.F(P(x,y,z))
|
venv/lib/python3.10/site-packages/nltk/test/chat80.doctest
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=======
|
5 |
+
Chat-80
|
6 |
+
=======
|
7 |
+
|
8 |
+
Chat-80 was a natural language system which allowed the user to
|
9 |
+
interrogate a Prolog knowledge base in the domain of world
|
10 |
+
geography. It was developed in the early '80s by Warren and Pereira; see
|
11 |
+
`<https://aclanthology.org/J82-3002.pdf>`_ for a description and
|
12 |
+
`<http://www.cis.upenn.edu/~pereira/oldies.html>`_ for the source
|
13 |
+
files.
|
14 |
+
|
15 |
+
The ``chat80`` module contains functions to extract data from the Chat-80
|
16 |
+
relation files ('the world database'), and convert then into a format
|
17 |
+
that can be incorporated in the FOL models of
|
18 |
+
``nltk.sem.evaluate``. The code assumes that the Prolog
|
19 |
+
input files are available in the NLTK corpora directory.
|
20 |
+
|
21 |
+
The Chat-80 World Database consists of the following files::
|
22 |
+
|
23 |
+
world0.pl
|
24 |
+
rivers.pl
|
25 |
+
cities.pl
|
26 |
+
countries.pl
|
27 |
+
contain.pl
|
28 |
+
borders.pl
|
29 |
+
|
30 |
+
This module uses a slightly modified version of ``world0.pl``, in which
|
31 |
+
a set of Prolog rules have been omitted. The modified file is named
|
32 |
+
``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
|
33 |
+
it uses a list rather than a string in the second field.
|
34 |
+
|
35 |
+
Reading Chat-80 Files
|
36 |
+
=====================
|
37 |
+
|
38 |
+
Chat-80 relations are like tables in a relational database. The
|
39 |
+
relation acts as the name of the table; the first argument acts as the
|
40 |
+
'primary key'; and subsequent arguments are further fields in the
|
41 |
+
table. In general, the name of the table provides a label for a unary
|
42 |
+
predicate whose extension is all the primary keys. For example,
|
43 |
+
relations in ``cities.pl`` are of the following form::
|
44 |
+
|
45 |
+
'city(athens,greece,1368).'
|
46 |
+
|
47 |
+
Here, ``'athens'`` is the key, and will be mapped to a member of the
|
48 |
+
unary predicate *city*.
|
49 |
+
|
50 |
+
By analogy with NLTK corpora, ``chat80`` defines a number of 'items'
|
51 |
+
which correspond to the relations.
|
52 |
+
|
53 |
+
>>> from nltk.sem import chat80
|
54 |
+
>>> print(chat80.items)
|
55 |
+
('borders', 'circle_of_lat', 'circle_of_long', 'city', ...)
|
56 |
+
|
57 |
+
The fields in the table are mapped to binary predicates. The first
|
58 |
+
argument of the predicate is the primary key, while the second
|
59 |
+
argument is the data in the relevant field. Thus, in the above
|
60 |
+
example, the third field is mapped to the binary predicate
|
61 |
+
*population_of*, whose extension is a set of pairs such as
|
62 |
+
``'(athens, 1368)'``.
|
63 |
+
|
64 |
+
An exception to this general framework is required by the relations in
|
65 |
+
the files ``borders.pl`` and ``contains.pl``. These contain facts of the
|
66 |
+
following form::
|
67 |
+
|
68 |
+
'borders(albania,greece).'
|
69 |
+
|
70 |
+
'contains0(africa,central_africa).'
|
71 |
+
|
72 |
+
We do not want to form a unary concept out the element in
|
73 |
+
the first field of these records, and we want the label of the binary
|
74 |
+
relation just to be ``'border'``/``'contain'`` respectively.
|
75 |
+
|
76 |
+
In order to drive the extraction process, we use 'relation metadata bundles'
|
77 |
+
which are Python dictionaries such as the following::
|
78 |
+
|
79 |
+
city = {'label': 'city',
|
80 |
+
'closures': [],
|
81 |
+
'schema': ['city', 'country', 'population'],
|
82 |
+
'filename': 'cities.pl'}
|
83 |
+
|
84 |
+
According to this, the file ``city['filename']`` contains a list of
|
85 |
+
relational tuples (or more accurately, the corresponding strings in
|
86 |
+
Prolog form) whose predicate symbol is ``city['label']`` and whose
|
87 |
+
relational schema is ``city['schema']``. The notion of a ``closure`` is
|
88 |
+
discussed in the next section.
|
89 |
+
|
90 |
+
Concepts
|
91 |
+
========
|
92 |
+
In order to encapsulate the results of the extraction, a class of
|
93 |
+
``Concept``\ s is introduced. A ``Concept`` object has a number of
|
94 |
+
attributes, in particular a ``prefLabel``, an arity and ``extension``.
|
95 |
+
|
96 |
+
>>> c1 = chat80.Concept('dog', arity=1, extension=set(['d1', 'd2']))
|
97 |
+
>>> print(c1)
|
98 |
+
Label = 'dog'
|
99 |
+
Arity = 1
|
100 |
+
Extension = ['d1', 'd2']
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
The ``extension`` attribute makes it easier to inspect the output of
|
105 |
+
the extraction.
|
106 |
+
|
107 |
+
>>> schema = ['city', 'country', 'population']
|
108 |
+
>>> concepts = chat80.clause2concepts('cities.pl', 'city', schema)
|
109 |
+
>>> concepts
|
110 |
+
[Concept('city'), Concept('country_of'), Concept('population_of')]
|
111 |
+
>>> for c in concepts:
|
112 |
+
... print("%s:\n\t%s" % (c.prefLabel, c.extension[:4]))
|
113 |
+
city:
|
114 |
+
['athens', 'bangkok', 'barcelona', 'berlin']
|
115 |
+
country_of:
|
116 |
+
[('athens', 'greece'), ('bangkok', 'thailand'), ('barcelona', 'spain'), ('berlin', 'east_germany')]
|
117 |
+
population_of:
|
118 |
+
[('athens', '1368'), ('bangkok', '1178'), ('barcelona', '1280'), ('berlin', '3481')]
|
119 |
+
|
120 |
+
In addition, the ``extension`` can be further
|
121 |
+
processed: in the case of the ``'border'`` relation, we check that the
|
122 |
+
relation is **symmetric**, and in the case of the ``'contain'``
|
123 |
+
relation, we carry out the **transitive closure**. The closure
|
124 |
+
properties associated with a concept is indicated in the relation
|
125 |
+
metadata, as indicated earlier.
|
126 |
+
|
127 |
+
>>> borders = set([('a1', 'a2'), ('a2', 'a3')])
|
128 |
+
>>> c2 = chat80.Concept('borders', arity=2, extension=borders)
|
129 |
+
>>> print(c2)
|
130 |
+
Label = 'borders'
|
131 |
+
Arity = 2
|
132 |
+
Extension = [('a1', 'a2'), ('a2', 'a3')]
|
133 |
+
>>> c3 = chat80.Concept('borders', arity=2, closures=['symmetric'], extension=borders)
|
134 |
+
>>> c3.close()
|
135 |
+
>>> print(c3)
|
136 |
+
Label = 'borders'
|
137 |
+
Arity = 2
|
138 |
+
Extension = [('a1', 'a2'), ('a2', 'a1'), ('a2', 'a3'), ('a3', 'a2')]
|
139 |
+
|
140 |
+
The ``extension`` of a ``Concept`` object is then incorporated into a
|
141 |
+
``Valuation`` object.
|
142 |
+
|
143 |
+
Persistence
|
144 |
+
===========
|
145 |
+
The functions ``val_dump`` and ``val_load`` are provided to allow a
|
146 |
+
valuation to be stored in a persistent database and re-loaded, rather
|
147 |
+
than having to be re-computed each time.
|
148 |
+
|
149 |
+
Individuals and Lexical Items
|
150 |
+
=============================
|
151 |
+
As well as deriving relations from the Chat-80 data, we also create a
|
152 |
+
set of individual constants, one for each entity in the domain. The
|
153 |
+
individual constants are string-identical to the entities. For
|
154 |
+
example, given a data item such as ``'zloty'``, we add to the valuation
|
155 |
+
a pair ``('zloty', 'zloty')``. In order to parse English sentences that
|
156 |
+
refer to these entities, we also create a lexical item such as the
|
157 |
+
following for each individual constant::
|
158 |
+
|
159 |
+
PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
|
160 |
+
|
161 |
+
The set of rules is written to the file ``chat_pnames.fcfg`` in the
|
162 |
+
current directory.
|
163 |
+
|
164 |
+
SQL Query
|
165 |
+
=========
|
166 |
+
|
167 |
+
The ``city`` relation is also available in RDB form and can be queried
|
168 |
+
using SQL statements.
|
169 |
+
|
170 |
+
>>> import nltk
|
171 |
+
>>> q = "SELECT City, Population FROM city_table WHERE Country = 'china' and Population > 1000"
|
172 |
+
>>> for answer in chat80.sql_query('corpora/city_database/city.db', q):
|
173 |
+
... print("%-10s %4s" % answer)
|
174 |
+
canton 1496
|
175 |
+
chungking 1100
|
176 |
+
mukden 1551
|
177 |
+
peking 2031
|
178 |
+
shanghai 5407
|
179 |
+
tientsin 1795
|
180 |
+
|
181 |
+
The (deliberately naive) grammar ``sql.fcfg`` translates from English
|
182 |
+
to SQL:
|
183 |
+
|
184 |
+
>>> nltk.data.show_cfg('grammars/book_grammars/sql0.fcfg')
|
185 |
+
% start S
|
186 |
+
S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp]
|
187 |
+
VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp]
|
188 |
+
VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap]
|
189 |
+
NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n]
|
190 |
+
PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np]
|
191 |
+
AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp]
|
192 |
+
NP[SEM='Country="greece"'] -> 'Greece'
|
193 |
+
NP[SEM='Country="china"'] -> 'China'
|
194 |
+
Det[SEM='SELECT'] -> 'Which' | 'What'
|
195 |
+
N[SEM='City FROM city_table'] -> 'cities'
|
196 |
+
IV[SEM=''] -> 'are'
|
197 |
+
A[SEM=''] -> 'located'
|
198 |
+
P[SEM=''] -> 'in'
|
199 |
+
|
200 |
+
Given this grammar, we can express, and then execute, queries in English.
|
201 |
+
|
202 |
+
>>> cp = nltk.parse.load_parser('grammars/book_grammars/sql0.fcfg')
|
203 |
+
>>> query = 'What cities are in China'
|
204 |
+
>>> for tree in cp.parse(query.split()):
|
205 |
+
... answer = tree.label()['SEM']
|
206 |
+
... q = " ".join(answer)
|
207 |
+
... print(q)
|
208 |
+
...
|
209 |
+
SELECT City FROM city_table WHERE Country="china"
|
210 |
+
|
211 |
+
>>> rows = chat80.sql_query('corpora/city_database/city.db', q)
|
212 |
+
>>> for r in rows: print("%s" % r, end=' ')
|
213 |
+
canton chungking dairen harbin kowloon mukden peking shanghai sian tientsin
|
214 |
+
|
215 |
+
|
216 |
+
Using Valuations
|
217 |
+
-----------------
|
218 |
+
|
219 |
+
In order to convert such an extension into a valuation, we use the
|
220 |
+
``make_valuation()`` method; setting ``read=True`` creates and returns
|
221 |
+
a new ``Valuation`` object which contains the results.
|
222 |
+
|
223 |
+
>>> val = chat80.make_valuation(concepts, read=True)
|
224 |
+
>>> 'calcutta' in val['city']
|
225 |
+
True
|
226 |
+
>>> [town for (town, country) in val['country_of'] if country == 'india']
|
227 |
+
['bombay', 'calcutta', 'delhi', 'hyderabad', 'madras']
|
228 |
+
>>> dom = val.domain
|
229 |
+
>>> g = nltk.sem.Assignment(dom)
|
230 |
+
>>> m = nltk.sem.Model(dom, val)
|
231 |
+
>>> m.evaluate(r'population_of(jakarta, 533)', g)
|
232 |
+
True
|
venv/lib/python3.10/site-packages/nltk/test/childes.doctest
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
=======================
|
2 |
+
CHILDES Corpus Readers
|
3 |
+
=======================
|
4 |
+
|
5 |
+
Read the XML version of the CHILDES corpus.
|
6 |
+
|
7 |
+
Setup
|
8 |
+
=====
|
9 |
+
|
10 |
+
>>> from nltk.test.childes_fixt import setup_module
|
11 |
+
>>> setup_module()
|
12 |
+
|
13 |
+
How to use CHILDESCorpusReader
|
14 |
+
==============================
|
15 |
+
|
16 |
+
Read the CHILDESCorpusReader class and read the CHILDES corpus saved in
|
17 |
+
the nltk_data directory.
|
18 |
+
|
19 |
+
>>> import nltk
|
20 |
+
>>> from nltk.corpus.reader import CHILDESCorpusReader
|
21 |
+
>>> corpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')
|
22 |
+
|
23 |
+
Reading files in the Valian corpus (Valian, 1991).
|
24 |
+
|
25 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
26 |
+
>>> valian.fileids()
|
27 |
+
['Valian/01a.xml', 'Valian/01b.xml', 'Valian/02a.xml', 'Valian/02b.xml',...
|
28 |
+
|
29 |
+
Count the number of files
|
30 |
+
|
31 |
+
>>> len(valian.fileids())
|
32 |
+
43
|
33 |
+
|
34 |
+
Printing properties of the corpus files.
|
35 |
+
|
36 |
+
>>> corpus_data = valian.corpus(valian.fileids())
|
37 |
+
>>> print(corpus_data[0]['Lang'])
|
38 |
+
eng
|
39 |
+
>>> for key in sorted(corpus_data[0].keys()):
|
40 |
+
... print(key, ": ", corpus_data[0][key])
|
41 |
+
Corpus : valian
|
42 |
+
Date : 1986-03-04
|
43 |
+
Id : 01a
|
44 |
+
Lang : eng
|
45 |
+
Version : 2.0.1
|
46 |
+
{http://www.w3.org/2001/XMLSchema-instance}schemaLocation : http://www.talkbank.org/ns/talkbank http://talkbank.org/software/talkbank.xsd
|
47 |
+
|
48 |
+
Printing information of participants of the corpus. The most common codes for
|
49 |
+
the participants are 'CHI' (target child), 'MOT' (mother), and 'INV' (investigator).
|
50 |
+
|
51 |
+
>>> corpus_participants = valian.participants(valian.fileids())
|
52 |
+
>>> for this_corpus_participants in corpus_participants[:2]:
|
53 |
+
... for key in sorted(this_corpus_participants.keys()):
|
54 |
+
... dct = this_corpus_participants[key]
|
55 |
+
... print(key, ": ", [(k, dct[k]) for k in sorted(dct.keys())])
|
56 |
+
CHI : [('age', 'P2Y1M3D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
57 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
58 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
59 |
+
CHI : [('age', 'P2Y1M12D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
|
60 |
+
INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
|
61 |
+
MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
|
62 |
+
|
63 |
+
printing words.
|
64 |
+
|
65 |
+
>>> valian.words('Valian/01a.xml')
|
66 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
67 |
+
|
68 |
+
printing sentences.
|
69 |
+
|
70 |
+
>>> valian.sents('Valian/01a.xml')
|
71 |
+
[['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname',
|
72 |
+
'and', 'it', 'is', 'March', 'fourth', 'I', 'believe', 'and', 'when',
|
73 |
+
'was', "Parent's", 'birthday'], ["Child's"], ['oh', "I'm", 'sorry'],
|
74 |
+
["that's", 'okay'], ...
|
75 |
+
|
76 |
+
You can specify the participants with the argument *speaker*.
|
77 |
+
|
78 |
+
>>> valian.words('Valian/01a.xml',speaker=['INV'])
|
79 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
|
80 |
+
>>> valian.words('Valian/01a.xml',speaker=['MOT'])
|
81 |
+
["Child's", "that's", 'okay', 'February', 'first', 'nineteen', ...
|
82 |
+
>>> valian.words('Valian/01a.xml',speaker=['CHI'])
|
83 |
+
['tape', 'it', 'up', 'and', 'two', 'tape', 'players', 'have',...
|
84 |
+
|
85 |
+
|
86 |
+
tagged_words() and tagged_sents() return the usual (word,pos) tuple lists.
|
87 |
+
POS tags in the CHILDES are automatically assigned by MOR and POST programs
|
88 |
+
(MacWhinney, 2000).
|
89 |
+
|
90 |
+
>>> valian.tagged_words('Valian/01a.xml')[:30]
|
91 |
+
[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
92 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
93 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
94 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
95 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n'), ("Child's", 'n:prop'),
|
96 |
+
('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj'), ("that's", 'pro:dem'),
|
97 |
+
('okay', 'adj'), ('February', 'n:prop'), ('first', 'adj'),
|
98 |
+
('nineteen', 'det:num'), ('eighty', 'det:num'), ('four', 'det:num')]
|
99 |
+
|
100 |
+
>>> valian.tagged_sents('Valian/01a.xml')[:10]
|
101 |
+
[[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
|
102 |
+
('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
|
103 |
+
('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
|
104 |
+
('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
|
105 |
+
('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n')],
|
106 |
+
[("Child's", 'n:prop')], [('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj')],
|
107 |
+
[("that's", 'pro:dem'), ('okay', 'adj')],
|
108 |
+
[('February', 'n:prop'), ('first', 'adj'), ('nineteen', 'det:num'),
|
109 |
+
('eighty', 'det:num'), ('four', 'det:num')],
|
110 |
+
[('great', 'adj')],
|
111 |
+
[('and', 'coord'), ("she's", 'pro:sub'), ('two', 'det:num'), ('years', 'n'), ('old', 'adj')],
|
112 |
+
[('correct', 'adj')],
|
113 |
+
[('okay', 'co')], [('she', 'pro:sub'), ('just', 'adv:int'), ('turned', 'part'), ('two', 'det:num'),
|
114 |
+
('a', 'det'), ('month', 'n'), ('ago', 'adv')]]
|
115 |
+
|
116 |
+
When the argument *stem* is true, the word stems (e.g., 'is' -> 'be-3PS') are
|
117 |
+
used instead of the original words.
|
118 |
+
|
119 |
+
>>> valian.words('Valian/01a.xml')[:30]
|
120 |
+
['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'is', ...
|
121 |
+
>>> valian.words('Valian/01a.xml',stem=True)[:30]
|
122 |
+
['at', 'Parent', 'Lastname', 's', 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'be-3S', ...
|
123 |
+
|
124 |
+
When the argument *replace* is true, the replaced words are used instead of
|
125 |
+
the original words.
|
126 |
+
|
127 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI')[247]
|
128 |
+
'tikteat'
|
129 |
+
>>> valian.words('Valian/01a.xml',speaker='CHI',replace=True)[247]
|
130 |
+
'trick'
|
131 |
+
|
132 |
+
When the argument *relation* is true, the relational relationships in the
|
133 |
+
sentence are returned. See Sagae et al. (2010) for details of the relational
|
134 |
+
structure adopted in the CHILDES.
|
135 |
+
|
136 |
+
>>> valian.words('Valian/01a.xml',relation=True)[:10]
|
137 |
+
[[('at', 'prep', '1|0|ROOT'), ('Parent', 'n', '2|5|VOC'), ('Lastname', 'n', '3|5|MOD'), ('s', 'poss', '4|5|MOD'), ('house', 'n', '5|1|POBJ'), ('with', 'prep', '6|1|JCT'), ('Child', 'n', '7|8|NAME'), ('Lastname', 'n', '8|6|POBJ'), ('and', 'coord', '9|8|COORD'), ('it', 'pro', '10|11|SUBJ'), ('be-3S', 'v', '11|9|COMP'), ('March', 'n', '12|11|PRED'), ('fourth', 'adj', '13|12|MOD'), ('I', 'pro', '15|16|SUBJ'), ('believe', 'v', '16|14|ROOT'), ('and', 'coord', '18|17|ROOT'), ('when', 'adv', '19|20|PRED'), ('be-PAST', 'v', '20|18|COMP'), ('Parent', 'n', '21|23|MOD'), ('s', 'poss', '22|23|MOD'), ('birth', 'n', '23|20|SUBJ')], [('Child', 'n', '1|2|MOD'), ('s', 'poss', '2|0|ROOT')], [('oh', 'co', '1|4|COM'), ('I', 'pro', '3|4|SUBJ'), ('be', 'v', '4|0|ROOT'), ('sorry', 'adj', '5|4|PRED')], [('that', 'pro', '1|2|SUBJ'), ('be', 'v', '2|0|ROOT'), ('okay', 'adj', '3|2|PRED')], [('February', 'n', '1|6|VOC'), ('first', 'adj', '2|6|ENUM'), ('nineteen', 'det', '4|6|ENUM'), ('eighty', 'det', '5|6|ENUM'), ('four', 'det', '6|0|ROOT')], [('great', 'adj', '1|0|ROOT')], [('and', 'coord', '1|0|ROOT'), ('she', 'pro', '2|1|ROOT'), ('be', 'aux', '3|5|AUX'), ('two', 'det', '4|5|QUANT'), ('year-PL', 'n', '5|2|ROOT'), ('old', 'adj', '6|5|MOD')], [('correct', 'adj', '1|0|ROOT')], [('okay', 'co', '1|0|ROOT')], [('she', 'pro', '1|0|ROOT'), ('just', 'adv', '2|3|JCT'), ('turn-PERF', 'part', '3|1|XCOMP'), ('two', 'det', '4|6|QUANT'), ('a', 'det', '5|6|DET'), ('month', 'n', '6|3|OBJ'), ('ago', 'adv', '7|3|JCT')]]
|
138 |
+
|
139 |
+
Printing age. When the argument *month* is true, the age information in
|
140 |
+
the CHILDES format is converted into the number of months.
|
141 |
+
|
142 |
+
>>> valian.age()
|
143 |
+
['P2Y1M3D', 'P2Y1M12D', 'P1Y9M21D', 'P1Y9M28D', 'P2Y1M23D', ...
|
144 |
+
>>> valian.age('Valian/01a.xml')
|
145 |
+
['P2Y1M3D']
|
146 |
+
>>> valian.age('Valian/01a.xml',month=True)
|
147 |
+
[25]
|
148 |
+
|
149 |
+
Printing MLU. The criteria for the MLU computation is broadly based on
|
150 |
+
Brown (1973).
|
151 |
+
|
152 |
+
>>> valian.MLU()
|
153 |
+
[2.3574660633484..., 2.292682926829..., 3.492857142857..., 2.961783439490...,
|
154 |
+
2.0842696629213..., 3.169811320754..., 3.137404580152..., 3.0578034682080...,
|
155 |
+
4.090163934426..., 3.488372093023..., 2.8773584905660..., 3.4792899408284...,
|
156 |
+
4.0111940298507..., 3.456790123456..., 4.487603305785..., 4.007936507936...,
|
157 |
+
5.25, 5.154696132596..., ...]
|
158 |
+
|
159 |
+
>>> valian.MLU('Valian/01a.xml')
|
160 |
+
[2.35746606334...]
|
161 |
+
|
162 |
+
|
163 |
+
Basic stuff
|
164 |
+
==============================
|
165 |
+
|
166 |
+
Count the number of words and sentences of each file.
|
167 |
+
|
168 |
+
>>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
|
169 |
+
>>> for this_file in valian.fileids()[:6]:
|
170 |
+
... print(valian.corpus(this_file)[0]['Corpus'], valian.corpus(this_file)[0]['Id'])
|
171 |
+
... print("num of words: %i" % len(valian.words(this_file)))
|
172 |
+
... print("num of sents: %i" % len(valian.sents(this_file)))
|
173 |
+
valian 01a
|
174 |
+
num of words: 3606
|
175 |
+
num of sents: 1027
|
176 |
+
valian 01b
|
177 |
+
num of words: 4376
|
178 |
+
num of sents: 1274
|
179 |
+
valian 02a
|
180 |
+
num of words: 2673
|
181 |
+
num of sents: 801
|
182 |
+
valian 02b
|
183 |
+
num of words: 5020
|
184 |
+
num of sents: 1583
|
185 |
+
valian 03a
|
186 |
+
num of words: 2743
|
187 |
+
num of sents: 988
|
188 |
+
valian 03b
|
189 |
+
num of words: 4409
|
190 |
+
num of sents: 1397
|
venv/lib/python3.10/site-packages/nltk/test/childes_fixt.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import nltk.data
|
5 |
+
|
6 |
+
try:
|
7 |
+
nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
|
8 |
+
except LookupError as e:
|
9 |
+
pytest.skip(
|
10 |
+
"The CHILDES corpus is not found. "
|
11 |
+
"It should be manually downloaded and saved/unpacked "
|
12 |
+
"to [NLTK_Data_Dir]/corpora/childes/"
|
13 |
+
)
|
venv/lib/python3.10/site-packages/nltk/test/chunk.doctest
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========
|
5 |
+
Chunking
|
6 |
+
==========
|
7 |
+
|
8 |
+
>>> from nltk.chunk import *
|
9 |
+
>>> from nltk.chunk.util import *
|
10 |
+
>>> from nltk.chunk.regexp import *
|
11 |
+
>>> from nltk import Tree
|
12 |
+
|
13 |
+
>>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./."
|
14 |
+
>>> gold_chunked_text = tagstr2tree(tagged_text)
|
15 |
+
>>> unchunked_text = gold_chunked_text.flatten()
|
16 |
+
|
17 |
+
Chunking uses a special regexp syntax for rules that delimit the chunks. These
|
18 |
+
rules must be converted to 'regular' regular expressions before a sentence can
|
19 |
+
be chunked.
|
20 |
+
|
21 |
+
>>> tag_pattern = "<DT>?<JJ>*<NN.*>"
|
22 |
+
>>> regexp_pattern = tag_pattern2re_pattern(tag_pattern)
|
23 |
+
>>> regexp_pattern
|
24 |
+
'(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)'
|
25 |
+
|
26 |
+
Construct some new chunking rules.
|
27 |
+
|
28 |
+
>>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything")
|
29 |
+
>>> strip_rule = StripRule(r"<VBD|IN|\.>", "Strip on verbs/prepositions")
|
30 |
+
>>> split_rule = SplitRule("<DT><NN>", "<DT><NN>",
|
31 |
+
... "Split successive determiner/noun pairs")
|
32 |
+
|
33 |
+
|
34 |
+
Create and score a series of chunk parsers, successively more complex.
|
35 |
+
|
36 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP')
|
37 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
38 |
+
>>> print(chunked_text)
|
39 |
+
(S
|
40 |
+
(NP
|
41 |
+
The/DT
|
42 |
+
cat/NN
|
43 |
+
sat/VBD
|
44 |
+
on/IN
|
45 |
+
the/DT
|
46 |
+
mat/NN
|
47 |
+
the/DT
|
48 |
+
dog/NN
|
49 |
+
chewed/VBD
|
50 |
+
./.))
|
51 |
+
|
52 |
+
>>> chunkscore = ChunkScore()
|
53 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
54 |
+
>>> print(chunkscore.precision())
|
55 |
+
0.0
|
56 |
+
|
57 |
+
>>> print(chunkscore.recall())
|
58 |
+
0.0
|
59 |
+
|
60 |
+
>>> print(chunkscore.f_measure())
|
61 |
+
0
|
62 |
+
|
63 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
64 |
+
(NP The/DT cat/NN)
|
65 |
+
(NP the/DT dog/NN)
|
66 |
+
(NP the/DT mat/NN)
|
67 |
+
|
68 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
69 |
+
(NP
|
70 |
+
The/DT
|
71 |
+
cat/NN
|
72 |
+
sat/VBD
|
73 |
+
on/IN
|
74 |
+
the/DT
|
75 |
+
mat/NN
|
76 |
+
the/DT
|
77 |
+
dog/NN
|
78 |
+
chewed/VBD
|
79 |
+
./.)
|
80 |
+
|
81 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule],
|
82 |
+
... chunk_label='NP')
|
83 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
84 |
+
>>> print(chunked_text)
|
85 |
+
(S
|
86 |
+
(NP The/DT cat/NN)
|
87 |
+
sat/VBD
|
88 |
+
on/IN
|
89 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
90 |
+
chewed/VBD
|
91 |
+
./.)
|
92 |
+
>>> assert chunked_text == chunk_parser.parse(list(unchunked_text))
|
93 |
+
|
94 |
+
>>> chunkscore = ChunkScore()
|
95 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
96 |
+
>>> chunkscore.precision()
|
97 |
+
0.5
|
98 |
+
|
99 |
+
>>> print(chunkscore.recall())
|
100 |
+
0.33333333...
|
101 |
+
|
102 |
+
>>> print(chunkscore.f_measure())
|
103 |
+
0.4
|
104 |
+
|
105 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
106 |
+
(NP the/DT dog/NN)
|
107 |
+
(NP the/DT mat/NN)
|
108 |
+
|
109 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
110 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
111 |
+
|
112 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule],
|
113 |
+
... chunk_label='NP')
|
114 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text, trace=True)
|
115 |
+
# Input:
|
116 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
117 |
+
# Chunk everything:
|
118 |
+
{<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>}
|
119 |
+
# Strip on verbs/prepositions:
|
120 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN> <DT> <NN>} <VBD> <.>
|
121 |
+
# Split successive determiner/noun pairs:
|
122 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
123 |
+
>>> print(chunked_text)
|
124 |
+
(S
|
125 |
+
(NP The/DT cat/NN)
|
126 |
+
sat/VBD
|
127 |
+
on/IN
|
128 |
+
(NP the/DT mat/NN)
|
129 |
+
(NP the/DT dog/NN)
|
130 |
+
chewed/VBD
|
131 |
+
./.)
|
132 |
+
|
133 |
+
>>> chunkscore = ChunkScore()
|
134 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
135 |
+
>>> chunkscore.precision()
|
136 |
+
1.0
|
137 |
+
|
138 |
+
>>> chunkscore.recall()
|
139 |
+
1.0
|
140 |
+
|
141 |
+
>>> chunkscore.f_measure()
|
142 |
+
1.0
|
143 |
+
|
144 |
+
>>> chunkscore.missed()
|
145 |
+
[]
|
146 |
+
|
147 |
+
>>> chunkscore.incorrect()
|
148 |
+
[]
|
149 |
+
|
150 |
+
>>> chunk_parser.rules()
|
151 |
+
[<ChunkRule: '<.*>+'>, <StripRule: '<VBD|IN|\\.>'>,
|
152 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>]
|
153 |
+
|
154 |
+
Printing parsers:
|
155 |
+
|
156 |
+
>>> print(repr(chunk_parser))
|
157 |
+
<RegexpChunkParser with 3 rules>
|
158 |
+
>>> print(chunk_parser)
|
159 |
+
RegexpChunkParser with 3 rules:
|
160 |
+
Chunk everything
|
161 |
+
<ChunkRule: '<.*>+'>
|
162 |
+
Strip on verbs/prepositions
|
163 |
+
<StripRule: '<VBD|IN|\\.>'>
|
164 |
+
Split successive determiner/noun pairs
|
165 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>
|
166 |
+
|
167 |
+
Regression Tests
|
168 |
+
~~~~~~~~~~~~~~~~
|
169 |
+
ChunkParserI
|
170 |
+
------------
|
171 |
+
`ChunkParserI` is an abstract interface -- it is not meant to be
|
172 |
+
instantiated directly.
|
173 |
+
|
174 |
+
>>> ChunkParserI().parse([])
|
175 |
+
Traceback (most recent call last):
|
176 |
+
. . .
|
177 |
+
NotImplementedError
|
178 |
+
|
179 |
+
|
180 |
+
ChunkString
|
181 |
+
-----------
|
182 |
+
ChunkString can be built from a tree of tagged tuples, a tree of
|
183 |
+
trees, or a mixed list of both:
|
184 |
+
|
185 |
+
>>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)])
|
186 |
+
>>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])])
|
187 |
+
>>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])])
|
188 |
+
>>> ChunkString(t1)
|
189 |
+
<ChunkString: '<t0><t1><t2><t3><t4><t5><t6><t7><t8><t9>'>
|
190 |
+
>>> ChunkString(t2)
|
191 |
+
<ChunkString: '<t0><t1>'>
|
192 |
+
>>> ChunkString(t3)
|
193 |
+
<ChunkString: '<t0><t1>'>
|
194 |
+
|
195 |
+
Other values generate an error:
|
196 |
+
|
197 |
+
>>> ChunkString(Tree('S', ['x']))
|
198 |
+
Traceback (most recent call last):
|
199 |
+
. . .
|
200 |
+
ValueError: chunk structures must contain tagged tokens or trees
|
201 |
+
|
202 |
+
The `str()` for a chunk string adds spaces to it, which makes it line
|
203 |
+
up with `str()` output for other chunk strings over the same
|
204 |
+
underlying input.
|
205 |
+
|
206 |
+
>>> cs = ChunkString(t1)
|
207 |
+
>>> print(cs)
|
208 |
+
<t0> <t1> <t2> <t3> <t4> <t5> <t6> <t7> <t8> <t9>
|
209 |
+
>>> cs.xform('<t3>', '{<t3>}')
|
210 |
+
>>> print(cs)
|
211 |
+
<t0> <t1> <t2> {<t3>} <t4> <t5> <t6> <t7> <t8> <t9>
|
212 |
+
|
213 |
+
The `_verify()` method makes sure that our transforms don't corrupt
|
214 |
+
the chunk string. By setting debug_level=2, `_verify()` will be
|
215 |
+
called at the end of every call to `xform`.
|
216 |
+
|
217 |
+
>>> cs = ChunkString(t1, debug_level=3)
|
218 |
+
|
219 |
+
>>> # tag not marked with <...>:
|
220 |
+
>>> cs.xform('<t3>', 't3')
|
221 |
+
Traceback (most recent call last):
|
222 |
+
. . .
|
223 |
+
ValueError: Transformation generated invalid chunkstring:
|
224 |
+
<t0><t1><t2>t3<t4><t5><t6><t7><t8><t9>
|
225 |
+
|
226 |
+
>>> # brackets not balanced:
|
227 |
+
>>> cs.xform('<t3>', '{<t3>')
|
228 |
+
Traceback (most recent call last):
|
229 |
+
. . .
|
230 |
+
ValueError: Transformation generated invalid chunkstring:
|
231 |
+
<t0><t1><t2>{<t3><t4><t5><t6><t7><t8><t9>
|
232 |
+
|
233 |
+
>>> # nested brackets:
|
234 |
+
>>> cs.xform('<t3><t4><t5>', '{<t3>{<t4>}<t5>}')
|
235 |
+
Traceback (most recent call last):
|
236 |
+
. . .
|
237 |
+
ValueError: Transformation generated invalid chunkstring:
|
238 |
+
<t0><t1><t2>{<t3>{<t4>}<t5>}<t6><t7><t8><t9>
|
239 |
+
|
240 |
+
>>> # modified tags:
|
241 |
+
>>> cs.xform('<t3>', '<t9>')
|
242 |
+
Traceback (most recent call last):
|
243 |
+
. . .
|
244 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
245 |
+
|
246 |
+
>>> # added tags:
|
247 |
+
>>> cs.xform('<t9>', '<t9><t10>')
|
248 |
+
Traceback (most recent call last):
|
249 |
+
. . .
|
250 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
251 |
+
|
252 |
+
Chunking Rules
|
253 |
+
--------------
|
254 |
+
|
255 |
+
Test the different rule constructors & __repr__ methods:
|
256 |
+
|
257 |
+
>>> r1 = RegexpChunkRule('<a|b>'+ChunkString.IN_STRIP_PATTERN,
|
258 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
259 |
+
>>> r2 = RegexpChunkRule(re.compile('<a|b>'+ChunkString.IN_STRIP_PATTERN),
|
260 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
261 |
+
>>> r3 = ChunkRule('<a|b>', 'chunk <a> and <b>')
|
262 |
+
>>> r4 = StripRule('<a|b>', 'strip <a> and <b>')
|
263 |
+
>>> r5 = UnChunkRule('<a|b>', 'unchunk <a> and <b>')
|
264 |
+
>>> r6 = MergeRule('<a>', '<b>', 'merge <a> w/ <b>')
|
265 |
+
>>> r7 = SplitRule('<a>', '<b>', 'split <a> from <b>')
|
266 |
+
>>> r8 = ExpandLeftRule('<a>', '<b>', 'expand left <a> <b>')
|
267 |
+
>>> r9 = ExpandRightRule('<a>', '<b>', 'expand right <a> <b>')
|
268 |
+
>>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9:
|
269 |
+
... print(rule)
|
270 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
271 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
272 |
+
<ChunkRule: '<a|b>'>
|
273 |
+
<StripRule: '<a|b>'>
|
274 |
+
<UnChunkRule: '<a|b>'>
|
275 |
+
<MergeRule: '<a>', '<b>'>
|
276 |
+
<SplitRule: '<a>', '<b>'>
|
277 |
+
<ExpandLeftRule: '<a>', '<b>'>
|
278 |
+
<ExpandRightRule: '<a>', '<b>'>
|
279 |
+
|
280 |
+
`tag_pattern2re_pattern()` complains if the tag pattern looks problematic:
|
281 |
+
|
282 |
+
>>> tag_pattern2re_pattern('{}')
|
283 |
+
Traceback (most recent call last):
|
284 |
+
. . .
|
285 |
+
ValueError: Bad tag pattern: '{}'
|
286 |
+
|
287 |
+
RegexpChunkParser
|
288 |
+
-----------------
|
289 |
+
|
290 |
+
A warning is printed when parsing an empty sentence:
|
291 |
+
|
292 |
+
>>> parser = RegexpChunkParser([ChunkRule('<a>', '')])
|
293 |
+
>>> parser.parse(Tree('S', []))
|
294 |
+
Warning: parsing empty text
|
295 |
+
Tree('S', [])
|
296 |
+
|
297 |
+
RegexpParser
|
298 |
+
------------
|
299 |
+
|
300 |
+
>>> parser = RegexpParser('''
|
301 |
+
... NP: {<DT>? <JJ>* <NN>*} # NP
|
302 |
+
... P: {<IN>} # Preposition
|
303 |
+
... V: {<V.*>} # Verb
|
304 |
+
... PP: {<P> <NP>} # PP -> P NP
|
305 |
+
... VP: {<V> <NP|PP>*} # VP -> V (NP|PP)*
|
306 |
+
... ''')
|
307 |
+
>>> print(repr(parser))
|
308 |
+
<chunk.RegexpParser with 5 stages>
|
309 |
+
>>> print(parser)
|
310 |
+
chunk.RegexpParser with 5 stages:
|
311 |
+
RegexpChunkParser with 1 rules:
|
312 |
+
NP <ChunkRule: '<DT>? <JJ>* <NN>*'>
|
313 |
+
RegexpChunkParser with 1 rules:
|
314 |
+
Preposition <ChunkRule: '<IN>'>
|
315 |
+
RegexpChunkParser with 1 rules:
|
316 |
+
Verb <ChunkRule: '<V.*>'>
|
317 |
+
RegexpChunkParser with 1 rules:
|
318 |
+
PP -> P NP <ChunkRule: '<P> <NP>'>
|
319 |
+
RegexpChunkParser with 1 rules:
|
320 |
+
VP -> V (NP|PP)* <ChunkRule: '<V> <NP|PP>*'>
|
321 |
+
>>> print(parser.parse(unchunked_text, trace=True))
|
322 |
+
# Input:
|
323 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
324 |
+
# NP:
|
325 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
326 |
+
# Input:
|
327 |
+
<NP> <VBD> <IN> <NP> <NP> <VBD> <.>
|
328 |
+
# Preposition:
|
329 |
+
<NP> <VBD> {<IN>} <NP> <NP> <VBD> <.>
|
330 |
+
# Input:
|
331 |
+
<NP> <VBD> <P> <NP> <NP> <VBD> <.>
|
332 |
+
# Verb:
|
333 |
+
<NP> {<VBD>} <P> <NP> <NP> {<VBD>} <.>
|
334 |
+
# Input:
|
335 |
+
<NP> <V> <P> <NP> <NP> <V> <.>
|
336 |
+
# PP -> P NP:
|
337 |
+
<NP> <V> {<P> <NP>} <NP> <V> <.>
|
338 |
+
# Input:
|
339 |
+
<NP> <V> <PP> <NP> <V> <.>
|
340 |
+
# VP -> V (NP|PP)*:
|
341 |
+
<NP> {<V> <PP> <NP>}{<V>} <.>
|
342 |
+
(S
|
343 |
+
(NP The/DT cat/NN)
|
344 |
+
(VP
|
345 |
+
(V sat/VBD)
|
346 |
+
(PP (P on/IN) (NP the/DT mat/NN))
|
347 |
+
(NP the/DT dog/NN))
|
348 |
+
(VP (V chewed/VBD))
|
349 |
+
./.)
|
350 |
+
|
351 |
+
Test parsing of other rule types:
|
352 |
+
|
353 |
+
>>> print(RegexpParser('''
|
354 |
+
... X:
|
355 |
+
... }<a><b>{ # strip rule
|
356 |
+
... <a>}{<b> # split rule
|
357 |
+
... <a>{}<b> # merge rule
|
358 |
+
... <a>{<b>}<c> # chunk rule w/ context
|
359 |
+
... '''))
|
360 |
+
chunk.RegexpParser with 1 stages:
|
361 |
+
RegexpChunkParser with 4 rules:
|
362 |
+
strip rule <StripRule: '<a><b>'>
|
363 |
+
split rule <SplitRule: '<a>', '<b>'>
|
364 |
+
merge rule <MergeRule: '<a>', '<b>'>
|
365 |
+
chunk rule w/ context <ChunkRuleWithContext: '<a>', '<b>', '<c>'>
|
366 |
+
|
367 |
+
Illegal patterns give an error message:
|
368 |
+
|
369 |
+
>>> print(RegexpParser('X: {<foo>} {<bar>}'))
|
370 |
+
Traceback (most recent call last):
|
371 |
+
. . .
|
372 |
+
ValueError: Illegal chunk pattern: {<foo>} {<bar>}
|
venv/lib/python3.10/site-packages/nltk/test/classify.doctest
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=============
|
5 |
+
Classifiers
|
6 |
+
=============
|
7 |
+
|
8 |
+
>>> from nltk.test.classify_fixt import setup_module
|
9 |
+
>>> setup_module()
|
10 |
+
|
11 |
+
Classifiers label tokens with category labels (or *class labels*).
|
12 |
+
Typically, labels are represented with strings (such as ``"health"``
|
13 |
+
or ``"sports"``. In NLTK, classifiers are defined using classes that
|
14 |
+
implement the `ClassifierI` interface, which supports the following operations:
|
15 |
+
|
16 |
+
- self.classify(featureset)
|
17 |
+
- self.classify_many(featuresets)
|
18 |
+
- self.labels()
|
19 |
+
- self.prob_classify(featureset)
|
20 |
+
- self.prob_classify_many(featuresets)
|
21 |
+
|
22 |
+
NLTK defines several classifier classes:
|
23 |
+
|
24 |
+
- `ConditionalExponentialClassifier`
|
25 |
+
- `DecisionTreeClassifier`
|
26 |
+
- `MaxentClassifier`
|
27 |
+
- `NaiveBayesClassifier`
|
28 |
+
- `WekaClassifier`
|
29 |
+
|
30 |
+
Classifiers are typically created by training them on a training
|
31 |
+
corpus.
|
32 |
+
|
33 |
+
|
34 |
+
Regression Tests
|
35 |
+
~~~~~~~~~~~~~~~~
|
36 |
+
|
37 |
+
We define a very simple training corpus with 3 binary features: ['a',
|
38 |
+
'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so
|
39 |
+
that the correct answers can be calculated analytically (although we
|
40 |
+
haven't done this yet for all tests).
|
41 |
+
|
42 |
+
>>> import nltk
|
43 |
+
>>> train = [
|
44 |
+
... (dict(a=1,b=1,c=1), 'y'),
|
45 |
+
... (dict(a=1,b=1,c=1), 'x'),
|
46 |
+
... (dict(a=1,b=1,c=0), 'y'),
|
47 |
+
... (dict(a=0,b=1,c=1), 'x'),
|
48 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
49 |
+
... (dict(a=0,b=0,c=1), 'y'),
|
50 |
+
... (dict(a=0,b=1,c=0), 'x'),
|
51 |
+
... (dict(a=0,b=0,c=0), 'x'),
|
52 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
53 |
+
... (dict(a=None,b=1,c=0), 'x'),
|
54 |
+
... ]
|
55 |
+
>>> test = [
|
56 |
+
... (dict(a=1,b=0,c=1)), # unseen
|
57 |
+
... (dict(a=1,b=0,c=0)), # unseen
|
58 |
+
... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x
|
59 |
+
... (dict(a=0,b=1,c=0)), # seen 1 time, label=x
|
60 |
+
... ]
|
61 |
+
|
62 |
+
Test the Naive Bayes classifier:
|
63 |
+
|
64 |
+
>>> classifier = nltk.classify.NaiveBayesClassifier.train(train)
|
65 |
+
>>> sorted(classifier.labels())
|
66 |
+
['x', 'y']
|
67 |
+
>>> classifier.classify_many(test)
|
68 |
+
['y', 'x', 'y', 'x']
|
69 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
70 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
71 |
+
0.2500 0.7500
|
72 |
+
0.5833 0.4167
|
73 |
+
0.3571 0.6429
|
74 |
+
0.7000 0.3000
|
75 |
+
>>> classifier.show_most_informative_features()
|
76 |
+
Most Informative Features
|
77 |
+
c = 0 x : y = 2.3 : 1.0
|
78 |
+
c = 1 y : x = 1.8 : 1.0
|
79 |
+
a = 1 y : x = 1.7 : 1.0
|
80 |
+
a = 0 x : y = 1.0 : 1.0
|
81 |
+
b = 0 x : y = 1.0 : 1.0
|
82 |
+
b = 1 x : y = 1.0 : 1.0
|
83 |
+
|
84 |
+
Test the Decision Tree classifier (without None):
|
85 |
+
|
86 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
87 |
+
... train[:-1], entropy_cutoff=0,
|
88 |
+
... support_cutoff=0)
|
89 |
+
>>> sorted(classifier.labels())
|
90 |
+
['x', 'y']
|
91 |
+
>>> print(classifier)
|
92 |
+
c=0? .................................................. x
|
93 |
+
a=0? ................................................ x
|
94 |
+
a=1? ................................................ y
|
95 |
+
c=1? .................................................. y
|
96 |
+
<BLANKLINE>
|
97 |
+
>>> classifier.classify_many(test)
|
98 |
+
['y', 'y', 'y', 'x']
|
99 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
100 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
101 |
+
Traceback (most recent call last):
|
102 |
+
. . .
|
103 |
+
NotImplementedError
|
104 |
+
|
105 |
+
|
106 |
+
Test the Decision Tree classifier (with None):
|
107 |
+
|
108 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
109 |
+
... train, entropy_cutoff=0,
|
110 |
+
... support_cutoff=0)
|
111 |
+
>>> sorted(classifier.labels())
|
112 |
+
['x', 'y']
|
113 |
+
>>> print(classifier)
|
114 |
+
c=0? .................................................. x
|
115 |
+
a=0? ................................................ x
|
116 |
+
a=1? ................................................ y
|
117 |
+
a=None? ............................................. x
|
118 |
+
c=1? .................................................. y
|
119 |
+
<BLANKLINE>
|
120 |
+
|
121 |
+
|
122 |
+
Test SklearnClassifier, which requires the scikit-learn package.
|
123 |
+
|
124 |
+
>>> from nltk.classify import SklearnClassifier
|
125 |
+
>>> from sklearn.naive_bayes import BernoulliNB
|
126 |
+
>>> from sklearn.svm import SVC
|
127 |
+
>>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"),
|
128 |
+
... ({"a": 5, "b": 2, "c": 1}, "ham"),
|
129 |
+
... ({"a": 0, "b": 3, "c": 4}, "spam"),
|
130 |
+
... ({"a": 5, "b": 1, "c": 1}, "ham"),
|
131 |
+
... ({"a": 1, "b": 4, "c": 3}, "spam")]
|
132 |
+
>>> classif = SklearnClassifier(BernoulliNB()).train(train_data)
|
133 |
+
>>> test_data = [{"a": 3, "b": 2, "c": 1},
|
134 |
+
... {"a": 0, "b": 3, "c": 7}]
|
135 |
+
>>> classif.classify_many(test_data)
|
136 |
+
['ham', 'spam']
|
137 |
+
>>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data)
|
138 |
+
>>> classif.classify_many(test_data)
|
139 |
+
['ham', 'spam']
|
140 |
+
|
141 |
+
Test the Maximum Entropy classifier training algorithms; they should all
|
142 |
+
generate the same results.
|
143 |
+
|
144 |
+
>>> def print_maxent_test_header():
|
145 |
+
... print(' '*11+''.join([' test[%s] ' % i
|
146 |
+
... for i in range(len(test))]))
|
147 |
+
... print(' '*11+' p(x) p(y)'*len(test))
|
148 |
+
... print('-'*(11+15*len(test)))
|
149 |
+
|
150 |
+
>>> def test_maxent(algorithm):
|
151 |
+
... print('%11s' % algorithm, end=' ')
|
152 |
+
... try:
|
153 |
+
... classifier = nltk.classify.MaxentClassifier.train(
|
154 |
+
... train, algorithm, trace=0, max_iter=1000)
|
155 |
+
... except Exception as e:
|
156 |
+
... print('Error: %r' % e)
|
157 |
+
... return
|
158 |
+
...
|
159 |
+
... for featureset in test:
|
160 |
+
... pdist = classifier.prob_classify(featureset)
|
161 |
+
... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ')
|
162 |
+
... print()
|
163 |
+
|
164 |
+
>>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS')
|
165 |
+
test[0] test[1] test[2] test[3]
|
166 |
+
p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y)
|
167 |
+
-----------------------------------------------------------------------
|
168 |
+
GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
169 |
+
IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
170 |
+
|
171 |
+
>>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP
|
172 |
+
MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
173 |
+
TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
Regression tests for TypedMaxentFeatureEncoding
|
178 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
179 |
+
|
180 |
+
>>> from nltk.classify import maxent
|
181 |
+
>>> train = [
|
182 |
+
... ({'a': 1, 'b': 1, 'c': 1}, 'y'),
|
183 |
+
... ({'a': 5, 'b': 5, 'c': 5}, 'x'),
|
184 |
+
... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'),
|
185 |
+
... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'),
|
186 |
+
... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'),
|
187 |
+
... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x')
|
188 |
+
... ]
|
189 |
+
|
190 |
+
>>> test = [
|
191 |
+
... {'a': 1, 'b': 0.8, 'c': 1.2},
|
192 |
+
... {'a': 5.2, 'b': 5.1, 'c': 5}
|
193 |
+
... ]
|
194 |
+
|
195 |
+
>>> encoding = maxent.TypedMaxentFeatureEncoding.train(
|
196 |
+
... train, count_cutoff=3, alwayson_features=True)
|
197 |
+
|
198 |
+
>>> classifier = maxent.MaxentClassifier.train(
|
199 |
+
... train, bernoulli=False, encoding=encoding, trace=0)
|
200 |
+
|
201 |
+
>>> classifier.classify_many(test)
|
202 |
+
['y', 'x']
|
venv/lib/python3.10/site-packages/nltk/test/classify_fixt.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# most of classify.doctest requires numpy
|
2 |
+
def setup_module():
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
pytest.importorskip("numpy")
|
venv/lib/python3.10/site-packages/nltk/test/collections.doctest
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===========
|
5 |
+
Collections
|
6 |
+
===========
|
7 |
+
|
8 |
+
>>> import nltk
|
9 |
+
>>> from nltk.collections import *
|
10 |
+
|
11 |
+
Trie
|
12 |
+
----
|
13 |
+
|
14 |
+
Trie can be pickled:
|
15 |
+
|
16 |
+
>>> import pickle
|
17 |
+
>>> trie = nltk.collections.Trie(['a'])
|
18 |
+
>>> s = pickle.dumps(trie)
|
19 |
+
>>> pickle.loads(s)
|
20 |
+
{'a': {True: None}}
|
21 |
+
|
22 |
+
LazyIteratorList
|
23 |
+
----------------
|
24 |
+
|
25 |
+
Fetching the length of a LazyIteratorList object does not throw a StopIteration exception:
|
26 |
+
|
27 |
+
>>> lil = LazyIteratorList(i for i in range(1, 11))
|
28 |
+
>>> lil[-1]
|
29 |
+
10
|
30 |
+
>>> len(lil)
|
31 |
+
10
|
venv/lib/python3.10/site-packages/nltk/test/collocations.doctest
ADDED
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============
|
5 |
+
Collocations
|
6 |
+
==============
|
7 |
+
|
8 |
+
Overview
|
9 |
+
~~~~~~~~
|
10 |
+
|
11 |
+
Collocations are expressions of multiple words which commonly co-occur. For
|
12 |
+
example, the top ten bigram collocations in Genesis are listed below, as
|
13 |
+
measured using Pointwise Mutual Information.
|
14 |
+
|
15 |
+
>>> import nltk
|
16 |
+
>>> from nltk.collocations import *
|
17 |
+
>>> bigram_measures = nltk.collocations.BigramAssocMeasures()
|
18 |
+
>>> trigram_measures = nltk.collocations.TrigramAssocMeasures()
|
19 |
+
>>> fourgram_measures = nltk.collocations.QuadgramAssocMeasures()
|
20 |
+
>>> finder = BigramCollocationFinder.from_words(
|
21 |
+
... nltk.corpus.genesis.words('english-web.txt'))
|
22 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
23 |
+
[('Allon', 'Bacuth'), ('Ashteroth', 'Karnaim'), ('Ben', 'Ammi'),
|
24 |
+
('En', 'Mishpat'), ('Jegar', 'Sahadutha'), ('Salt', 'Sea'),
|
25 |
+
('Whoever', 'sheds'), ('appoint', 'overseers'), ('aromatic', 'resin'),
|
26 |
+
('cutting', 'instrument')]
|
27 |
+
|
28 |
+
While these words are highly collocated, the expressions are also very
|
29 |
+
infrequent. Therefore it is useful to apply filters, such as ignoring all
|
30 |
+
bigrams which occur less than three times in the corpus:
|
31 |
+
|
32 |
+
>>> finder.apply_freq_filter(3)
|
33 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
34 |
+
[('Beer', 'Lahai'), ('Lahai', 'Roi'), ('gray', 'hairs'),
|
35 |
+
('ewe', 'lambs'), ('Most', 'High'), ('many', 'colors'),
|
36 |
+
('burnt', 'offering'), ('Paddan', 'Aram'), ('east', 'wind'),
|
37 |
+
('living', 'creature')]
|
38 |
+
|
39 |
+
We may similarly find collocations among tagged words:
|
40 |
+
|
41 |
+
>>> finder = BigramCollocationFinder.from_words(
|
42 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
43 |
+
>>> finder.nbest(bigram_measures.pmi, 5)
|
44 |
+
[(('1,119', 'NUM'), ('votes', 'NOUN')),
|
45 |
+
(('1962', 'NUM'), ("governor's", 'NOUN')),
|
46 |
+
(('637', 'NUM'), ('E.', 'NOUN')),
|
47 |
+
(('Alpharetta', 'NOUN'), ('prison', 'NOUN')),
|
48 |
+
(('Bar', 'NOUN'), ('Association', 'NOUN'))]
|
49 |
+
|
50 |
+
Or tags alone:
|
51 |
+
|
52 |
+
>>> finder = BigramCollocationFinder.from_words(t for w, t in
|
53 |
+
... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
|
54 |
+
>>> finder.nbest(bigram_measures.pmi, 10)
|
55 |
+
[('PRT', 'VERB'), ('PRON', 'VERB'), ('ADP', 'DET'), ('.', 'PRON'), ('DET', 'ADJ'),
|
56 |
+
('CONJ', 'PRON'), ('ADP', 'NUM'), ('NUM', '.'), ('ADV', 'ADV'), ('VERB', 'ADV')]
|
57 |
+
|
58 |
+
Or spanning intervening words:
|
59 |
+
|
60 |
+
>>> finder = BigramCollocationFinder.from_words(
|
61 |
+
... nltk.corpus.genesis.words('english-web.txt'),
|
62 |
+
... window_size = 20)
|
63 |
+
>>> finder.apply_freq_filter(2)
|
64 |
+
>>> ignored_words = nltk.corpus.stopwords.words('english')
|
65 |
+
>>> finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
|
66 |
+
>>> finder.nbest(bigram_measures.likelihood_ratio, 10)
|
67 |
+
[('chief', 'chief'), ('became', 'father'), ('years', 'became'),
|
68 |
+
('hundred', 'years'), ('lived', 'became'), ('king', 'king'),
|
69 |
+
('lived', 'years'), ('became', 'became'), ('chief', 'chiefs'),
|
70 |
+
('hundred', 'became')]
|
71 |
+
|
72 |
+
Finders
|
73 |
+
~~~~~~~
|
74 |
+
|
75 |
+
The collocations package provides collocation finders which by default
|
76 |
+
consider all ngrams in a text as candidate collocations:
|
77 |
+
|
78 |
+
>>> text = "I do not like green eggs and ham, I do not like them Sam I am!"
|
79 |
+
>>> tokens = nltk.wordpunct_tokenize(text)
|
80 |
+
>>> finder = BigramCollocationFinder.from_words(tokens)
|
81 |
+
>>> scored = finder.score_ngrams(bigram_measures.raw_freq)
|
82 |
+
>>> sorted(bigram for bigram, score in scored)
|
83 |
+
[(',', 'I'), ('I', 'am'), ('I', 'do'), ('Sam', 'I'), ('am', '!'),
|
84 |
+
('and', 'ham'), ('do', 'not'), ('eggs', 'and'), ('green', 'eggs'),
|
85 |
+
('ham', ','), ('like', 'green'), ('like', 'them'), ('not', 'like'),
|
86 |
+
('them', 'Sam')]
|
87 |
+
|
88 |
+
We could otherwise construct the collocation finder from manually-derived
|
89 |
+
FreqDists:
|
90 |
+
|
91 |
+
>>> word_fd = nltk.FreqDist(tokens)
|
92 |
+
>>> bigram_fd = nltk.FreqDist(nltk.bigrams(tokens))
|
93 |
+
>>> finder = BigramCollocationFinder(word_fd, bigram_fd)
|
94 |
+
>>> scored == finder.score_ngrams(bigram_measures.raw_freq)
|
95 |
+
True
|
96 |
+
|
97 |
+
A similar interface is provided for trigrams:
|
98 |
+
|
99 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
100 |
+
>>> scored = finder.score_ngrams(trigram_measures.raw_freq)
|
101 |
+
>>> set(trigram for trigram, score in scored) == set(nltk.trigrams(tokens))
|
102 |
+
True
|
103 |
+
|
104 |
+
We may want to select only the top n results:
|
105 |
+
|
106 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 2))
|
107 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
108 |
+
|
109 |
+
Alternatively, we can select those above a minimum score value:
|
110 |
+
|
111 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
112 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
113 |
+
[('I', 'do', 'not'), ('do', 'not', 'like')]
|
114 |
+
|
115 |
+
Now spanning intervening words:
|
116 |
+
|
117 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
118 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens, window_size=4)
|
119 |
+
>>> sorted(finder.nbest(trigram_measures.raw_freq, 4))
|
120 |
+
[('I', 'do', 'like'), ('I', 'do', 'not'), ('I', 'not', 'like'), ('do', 'not', 'like')]
|
121 |
+
|
122 |
+
A closer look at the finder's ngram frequencies:
|
123 |
+
|
124 |
+
>>> sorted(finder.ngram_fd.items(), key=lambda t: (-t[1], t[0]))[:10]
|
125 |
+
[(('I', 'do', 'like'), 2), (('I', 'do', 'not'), 2), (('I', 'not', 'like'), 2),
|
126 |
+
(('do', 'not', 'like'), 2), ((',', 'I', 'do'), 1), ((',', 'I', 'not'), 1),
|
127 |
+
((',', 'do', 'not'), 1), (('I', 'am', '!'), 1), (('Sam', 'I', '!'), 1),
|
128 |
+
(('Sam', 'I', 'am'), 1)]
|
129 |
+
|
130 |
+
A similar interface is provided for fourgrams:
|
131 |
+
|
132 |
+
>>> finder_4grams = QuadgramCollocationFinder.from_words(tokens)
|
133 |
+
>>> scored_4grams = finder_4grams.score_ngrams(fourgram_measures.raw_freq)
|
134 |
+
>>> set(fourgram for fourgram, score in scored_4grams) == set(nltk.ngrams(tokens, n=4))
|
135 |
+
True
|
136 |
+
|
137 |
+
Filtering candidates
|
138 |
+
~~~~~~~~~~~~~~~~~~~~
|
139 |
+
|
140 |
+
All the ngrams in a text are often too many to be useful when finding
|
141 |
+
collocations. It is generally useful to remove some words or punctuation,
|
142 |
+
and to require a minimum frequency for candidate collocations.
|
143 |
+
|
144 |
+
Given our sample text above, if we remove all trigrams containing personal
|
145 |
+
pronouns from candidature, score_ngrams should return 6 less results, and
|
146 |
+
'do not like' will be the only candidate which occurs more than once:
|
147 |
+
|
148 |
+
>>> finder = TrigramCollocationFinder.from_words(tokens)
|
149 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
150 |
+
14
|
151 |
+
>>> finder.apply_word_filter(lambda w: w in ('I', 'me'))
|
152 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
153 |
+
8
|
154 |
+
>>> sorted(finder.above_score(trigram_measures.raw_freq,
|
155 |
+
... 1.0 / len(tuple(nltk.trigrams(tokens)))))
|
156 |
+
[('do', 'not', 'like')]
|
157 |
+
|
158 |
+
Sometimes a filter is a function on the whole ngram, rather than each word,
|
159 |
+
such as if we may permit 'and' to appear in the middle of a trigram, but
|
160 |
+
not on either edge:
|
161 |
+
|
162 |
+
>>> finder.apply_ngram_filter(lambda w1, w2, w3: 'and' in (w1, w3))
|
163 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
164 |
+
6
|
165 |
+
|
166 |
+
Finally, it is often important to remove low frequency candidates, as we
|
167 |
+
lack sufficient evidence about their significance as collocations:
|
168 |
+
|
169 |
+
>>> finder.apply_freq_filter(2)
|
170 |
+
>>> len(finder.score_ngrams(trigram_measures.raw_freq))
|
171 |
+
1
|
172 |
+
|
173 |
+
Association measures
|
174 |
+
~~~~~~~~~~~~~~~~~~~~
|
175 |
+
|
176 |
+
A number of measures are available to score collocations or other associations.
|
177 |
+
The arguments to measure functions are marginals of a contingency table, in the
|
178 |
+
bigram case (n_ii, (n_ix, n_xi), n_xx)::
|
179 |
+
|
180 |
+
w1 ~w1
|
181 |
+
------ ------
|
182 |
+
w2 | n_ii | n_oi | = n_xi
|
183 |
+
------ ------
|
184 |
+
~w2 | n_io | n_oo |
|
185 |
+
------ ------
|
186 |
+
= n_ix TOTAL = n_xx
|
187 |
+
|
188 |
+
We test their calculation using some known values presented in Manning and
|
189 |
+
Schutze's text and other papers.
|
190 |
+
|
191 |
+
Student's t: examples from Manning and Schutze 5.3.2
|
192 |
+
|
193 |
+
>>> print('%0.4f' % bigram_measures.student_t(8, (15828, 4675), 14307668))
|
194 |
+
0.9999
|
195 |
+
>>> print('%0.4f' % bigram_measures.student_t(20, (42, 20), 14307668))
|
196 |
+
4.4721
|
197 |
+
|
198 |
+
Chi-square: examples from Manning and Schutze 5.3.3
|
199 |
+
|
200 |
+
>>> print('%0.2f' % bigram_measures.chi_sq(8, (15828, 4675), 14307668))
|
201 |
+
1.55
|
202 |
+
>>> print('%0.0f' % bigram_measures.chi_sq(59, (67, 65), 571007))
|
203 |
+
456400
|
204 |
+
|
205 |
+
Likelihood ratios: examples from Dunning, CL, 1993
|
206 |
+
|
207 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(110, (2552, 221), 31777))
|
208 |
+
270.72
|
209 |
+
>>> print('%0.2f' % bigram_measures.likelihood_ratio(8, (13, 32), 31777))
|
210 |
+
95.29
|
211 |
+
|
212 |
+
Pointwise Mutual Information: examples from Manning and Schutze 5.4
|
213 |
+
|
214 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (42, 20), 14307668))
|
215 |
+
18.38
|
216 |
+
>>> print('%0.2f' % bigram_measures.pmi(20, (15019, 15629), 14307668))
|
217 |
+
0.29
|
218 |
+
|
219 |
+
TODO: Find authoritative results for trigrams.
|
220 |
+
|
221 |
+
Using contingency table values
|
222 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
223 |
+
|
224 |
+
While frequency counts make marginals readily available for collocation
|
225 |
+
finding, it is common to find published contingency table values. The
|
226 |
+
collocations package therefore provides a wrapper, ContingencyMeasures, which
|
227 |
+
wraps an association measures class, providing association measures which
|
228 |
+
take contingency values as arguments, (n_ii, n_io, n_oi, n_oo) in the
|
229 |
+
bigram case.
|
230 |
+
|
231 |
+
>>> from nltk.metrics import ContingencyMeasures
|
232 |
+
>>> cont_bigram_measures = ContingencyMeasures(bigram_measures)
|
233 |
+
>>> print('%0.2f' % cont_bigram_measures.likelihood_ratio(8, 5, 24, 31740))
|
234 |
+
95.29
|
235 |
+
>>> print('%0.2f' % cont_bigram_measures.chi_sq(8, 15820, 4667, 14287173))
|
236 |
+
1.55
|
237 |
+
|
238 |
+
Ranking and correlation
|
239 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
240 |
+
|
241 |
+
It is useful to consider the results of finding collocations as a ranking, and
|
242 |
+
the rankings output using different association measures can be compared using
|
243 |
+
the Spearman correlation coefficient.
|
244 |
+
|
245 |
+
Ranks can be assigned to a sorted list of results trivially by assigning
|
246 |
+
strictly increasing ranks to each result:
|
247 |
+
|
248 |
+
>>> from nltk.metrics.spearman import *
|
249 |
+
>>> results_list = ['item1', 'item2', 'item3', 'item4', 'item5']
|
250 |
+
>>> print(list(ranks_from_sequence(results_list)))
|
251 |
+
[('item1', 0), ('item2', 1), ('item3', 2), ('item4', 3), ('item5', 4)]
|
252 |
+
|
253 |
+
If scores are available for each result, we may allow sufficiently similar
|
254 |
+
results (differing by no more than rank_gap) to be assigned the same rank:
|
255 |
+
|
256 |
+
>>> results_scored = [('item1', 50.0), ('item2', 40.0), ('item3', 38.0),
|
257 |
+
... ('item4', 35.0), ('item5', 14.0)]
|
258 |
+
>>> print(list(ranks_from_scores(results_scored, rank_gap=5)))
|
259 |
+
[('item1', 0), ('item2', 1), ('item3', 1), ('item4', 1), ('item5', 4)]
|
260 |
+
|
261 |
+
The Spearman correlation coefficient gives a number from -1.0 to 1.0 comparing
|
262 |
+
two rankings. A coefficient of 1.0 indicates identical rankings; -1.0 indicates
|
263 |
+
exact opposite rankings.
|
264 |
+
|
265 |
+
>>> print('%0.1f' % spearman_correlation(
|
266 |
+
... ranks_from_sequence(results_list),
|
267 |
+
... ranks_from_sequence(results_list)))
|
268 |
+
1.0
|
269 |
+
>>> print('%0.1f' % spearman_correlation(
|
270 |
+
... ranks_from_sequence(reversed(results_list)),
|
271 |
+
... ranks_from_sequence(results_list)))
|
272 |
+
-1.0
|
273 |
+
>>> results_list2 = ['item2', 'item3', 'item1', 'item5', 'item4']
|
274 |
+
>>> print('%0.1f' % spearman_correlation(
|
275 |
+
... ranks_from_sequence(results_list),
|
276 |
+
... ranks_from_sequence(results_list2)))
|
277 |
+
0.6
|
278 |
+
>>> print('%0.1f' % spearman_correlation(
|
279 |
+
... ranks_from_sequence(reversed(results_list)),
|
280 |
+
... ranks_from_sequence(results_list2)))
|
281 |
+
-0.6
|
282 |
+
|
283 |
+
Keywords
|
284 |
+
~~~~~~~~
|
285 |
+
|
286 |
+
Bigram association metrics can also be used to perform keyword analysis. . For example, this finds the keywords
|
287 |
+
associated with the "romance" section of the Brown corpus as measured by likelihood ratio:
|
288 |
+
|
289 |
+
>>> romance = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words(categories='romance') if w.isalpha())
|
290 |
+
>>> freq = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words() if w.isalpha())
|
291 |
+
|
292 |
+
>>> key = nltk.FreqDist()
|
293 |
+
>>> for w in romance:
|
294 |
+
... key[w] = bigram_measures.likelihood_ratio(romance[w], (freq[w], romance.N()), freq.N())
|
295 |
+
|
296 |
+
>>> for k,v in key.most_common(10):
|
297 |
+
... print(f'{k:10s} {v:9.3f}')
|
298 |
+
she 1163.325
|
299 |
+
i 995.961
|
300 |
+
her 930.528
|
301 |
+
you 513.149
|
302 |
+
of 501.891
|
303 |
+
is 463.386
|
304 |
+
had 421.615
|
305 |
+
he 411.000
|
306 |
+
the 347.632
|
307 |
+
said 300.811
|
venv/lib/python3.10/site-packages/nltk/test/concordance.doctest
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2016 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==================================
|
5 |
+
Concordance Example
|
6 |
+
==================================
|
7 |
+
|
8 |
+
A concordance view shows us every occurrence of a given
|
9 |
+
word, together with some context. Here we look up the word monstrous
|
10 |
+
in Moby Dick by entering text1 followed by a period, then the term
|
11 |
+
concordance, and then placing "monstrous" in parentheses:
|
12 |
+
|
13 |
+
>>> from nltk.corpus import gutenberg
|
14 |
+
>>> from nltk.text import Text
|
15 |
+
>>> corpus = gutenberg.words('melville-moby_dick.txt')
|
16 |
+
>>> text = Text(corpus)
|
17 |
+
|
18 |
+
>>> text.concordance("monstrous")
|
19 |
+
Displaying 11 of 11 matches:
|
20 |
+
ong the former , one was of a most monstrous size . ... This came towards us ,
|
21 |
+
ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
|
22 |
+
ll over with a heathenish array of monstrous clubs and spears . Some were thick
|
23 |
+
d as you gazed , and wondered what monstrous cannibal and savage could ever hav
|
24 |
+
that has survived the flood ; most monstrous and most mountainous ! That Himmal
|
25 |
+
they might scout at Moby Dick as a monstrous fable , or still worse and more de
|
26 |
+
th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
|
27 |
+
ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
|
28 |
+
ere to enter upon those still more monstrous stories of them which are to be fo
|
29 |
+
ght have been rummaged out of this monstrous cabinet there is no telling . But
|
30 |
+
of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
|
31 |
+
|
32 |
+
>>> text.concordance("monstrous")
|
33 |
+
Displaying 11 of 11 matches:
|
34 |
+
ong the former , one was of a most monstrous size . ... This came towards us ,
|
35 |
+
ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
|
36 |
+
ll over with a heathenish array of monstrous clubs and spears . Some were thick
|
37 |
+
...
|
38 |
+
|
39 |
+
We can also search for a multi-word phrase by passing a list of strings:
|
40 |
+
|
41 |
+
>>> text.concordance(["monstrous", "size"])
|
42 |
+
Displaying 2 of 2 matches:
|
43 |
+
the former , one was of a most monstrous size . ... This came towards us , op
|
44 |
+
Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead upo
|
45 |
+
|
46 |
+
=================================
|
47 |
+
Concordance List
|
48 |
+
=================================
|
49 |
+
|
50 |
+
Often we need to store the results of concordance for further usage.
|
51 |
+
To do so, call the concordance function with the stdout argument set
|
52 |
+
to false:
|
53 |
+
|
54 |
+
>>> from nltk.corpus import gutenberg
|
55 |
+
>>> from nltk.text import Text
|
56 |
+
>>> corpus = gutenberg.words('melville-moby_dick.txt')
|
57 |
+
>>> text = Text(corpus)
|
58 |
+
>>> con_list = text.concordance_list("monstrous")
|
59 |
+
>>> con_list[2].line
|
60 |
+
'll over with a heathenish array of monstrous clubs and spears . Some were thick'
|
61 |
+
>>> len(con_list)
|
62 |
+
11
|
63 |
+
|
64 |
+
=================================
|
65 |
+
Patching Issue #2088
|
66 |
+
=================================
|
67 |
+
|
68 |
+
Patching https://github.com/nltk/nltk/issues/2088
|
69 |
+
The left slice of the left context should be clip to 0 if the `i-context` < 0.
|
70 |
+
|
71 |
+
>>> from nltk import Text, word_tokenize
|
72 |
+
>>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
|
73 |
+
>>> text = Text(word_tokenize(jane_eyre))
|
74 |
+
>>> text.concordance_list('taking')[0].left
|
75 |
+
['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of']
|
venv/lib/python3.10/site-packages/nltk/test/conftest.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from nltk.corpus.reader import CorpusReader
|
4 |
+
|
5 |
+
|
6 |
+
@pytest.fixture(autouse=True)
|
7 |
+
def mock_plot(mocker):
|
8 |
+
"""Disable matplotlib plotting in test code"""
|
9 |
+
|
10 |
+
try:
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
|
13 |
+
mocker.patch.object(plt, "gca")
|
14 |
+
mocker.patch.object(plt, "show")
|
15 |
+
except ImportError:
|
16 |
+
pass
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture(scope="module", autouse=True)
|
20 |
+
def teardown_loaded_corpora():
|
21 |
+
"""
|
22 |
+
After each test session ends (either doctest or unit test),
|
23 |
+
unload any loaded corpora
|
24 |
+
"""
|
25 |
+
|
26 |
+
yield # first, wait for the test to end
|
27 |
+
|
28 |
+
import nltk.corpus
|
29 |
+
|
30 |
+
for name in dir(nltk.corpus):
|
31 |
+
obj = getattr(nltk.corpus, name, None)
|
32 |
+
if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"):
|
33 |
+
obj._unload()
|
venv/lib/python3.10/site-packages/nltk/test/corpus.doctest
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/nltk/test/crubadan.doctest
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
Crubadan Corpus Reader
|
5 |
+
======================
|
6 |
+
|
7 |
+
Crubadan is an NLTK corpus reader for ngram files provided
|
8 |
+
by the Crubadan project. It supports several languages.
|
9 |
+
|
10 |
+
>>> from nltk.corpus import crubadan
|
11 |
+
>>> crubadan.langs()
|
12 |
+
['abk', 'abn',..., 'zpa', 'zul']
|
13 |
+
|
14 |
+
----------------------------------------
|
15 |
+
Language code mapping and helper methods
|
16 |
+
----------------------------------------
|
17 |
+
|
18 |
+
The web crawler that generates the 3-gram frequencies works at the
|
19 |
+
level of "writing systems" rather than languages. Writing systems
|
20 |
+
are assigned internal 2-3 letter codes that require mapping to the
|
21 |
+
standard ISO 639-3 codes. For more information, please refer to
|
22 |
+
the README in nltk_data/crubadan folder after installing it.
|
23 |
+
|
24 |
+
To translate ISO 639-3 codes to "Crubadan Code":
|
25 |
+
|
26 |
+
>>> crubadan.iso_to_crubadan('eng')
|
27 |
+
'en'
|
28 |
+
>>> crubadan.iso_to_crubadan('fra')
|
29 |
+
'fr'
|
30 |
+
>>> crubadan.iso_to_crubadan('aaa')
|
31 |
+
|
32 |
+
In reverse, print ISO 639-3 code if we have the Crubadan Code:
|
33 |
+
|
34 |
+
>>> crubadan.crubadan_to_iso('en')
|
35 |
+
'eng'
|
36 |
+
>>> crubadan.crubadan_to_iso('fr')
|
37 |
+
'fra'
|
38 |
+
>>> crubadan.crubadan_to_iso('aa')
|
39 |
+
|
40 |
+
---------------------------
|
41 |
+
Accessing ngram frequencies
|
42 |
+
---------------------------
|
43 |
+
|
44 |
+
On initialization the reader will create a dictionary of every
|
45 |
+
language supported by the Crubadan project, mapping the ISO 639-3
|
46 |
+
language code to its corresponding ngram frequency.
|
47 |
+
|
48 |
+
You can access individual language FreqDist and the ngrams within them as follows:
|
49 |
+
|
50 |
+
>>> english_fd = crubadan.lang_freq('eng')
|
51 |
+
>>> english_fd['the']
|
52 |
+
728135
|
53 |
+
|
54 |
+
Above accesses the FreqDist of English and returns the frequency of the ngram 'the'.
|
55 |
+
A ngram that isn't found within the language will return 0:
|
56 |
+
|
57 |
+
>>> english_fd['sometest']
|
58 |
+
0
|
59 |
+
|
60 |
+
A language that isn't supported will raise an exception:
|
61 |
+
|
62 |
+
>>> crubadan.lang_freq('elvish')
|
63 |
+
Traceback (most recent call last):
|
64 |
+
...
|
65 |
+
RuntimeError: Unsupported language.
|
venv/lib/python3.10/site-packages/nltk/test/data.doctest
ADDED
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========================================
|
5 |
+
Loading Resources From the Data Package
|
6 |
+
=========================================
|
7 |
+
|
8 |
+
>>> import nltk.data
|
9 |
+
|
10 |
+
Overview
|
11 |
+
~~~~~~~~
|
12 |
+
The `nltk.data` module contains functions that can be used to load
|
13 |
+
NLTK resource files, such as corpora, grammars, and saved processing
|
14 |
+
objects.
|
15 |
+
|
16 |
+
Loading Data Files
|
17 |
+
~~~~~~~~~~~~~~~~~~
|
18 |
+
Resources are loaded using the function `nltk.data.load()`, which
|
19 |
+
takes as its first argument a URL specifying what file should be
|
20 |
+
loaded. The ``nltk:`` protocol loads files from the NLTK data
|
21 |
+
distribution:
|
22 |
+
|
23 |
+
>>> tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
|
24 |
+
>>> tokenizer.tokenize('Hello. This is a test. It works!')
|
25 |
+
['Hello.', 'This is a test.', 'It works!']
|
26 |
+
|
27 |
+
It is important to note that there should be no space following the
|
28 |
+
colon (':') in the URL; 'nltk: tokenizers/punkt/english.pickle' will
|
29 |
+
not work!
|
30 |
+
|
31 |
+
The ``nltk:`` protocol is used by default if no protocol is specified:
|
32 |
+
|
33 |
+
>>> nltk.data.load('tokenizers/punkt/english.pickle')
|
34 |
+
<nltk.tokenize.punkt.PunktSentenceTokenizer object at ...>
|
35 |
+
|
36 |
+
But it is also possible to load resources from ``http:``, ``ftp:``,
|
37 |
+
and ``file:`` URLs:
|
38 |
+
|
39 |
+
>>> # Load a grammar from the NLTK webpage.
|
40 |
+
>>> cfg = nltk.data.load('https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg')
|
41 |
+
>>> print(cfg) # doctest: +ELLIPSIS
|
42 |
+
Grammar with 14 productions (start state = S)
|
43 |
+
S -> NP VP
|
44 |
+
PP -> P NP
|
45 |
+
...
|
46 |
+
P -> 'on'
|
47 |
+
P -> 'in'
|
48 |
+
|
49 |
+
>>> # Load a grammar using an absolute path.
|
50 |
+
>>> url = 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg')
|
51 |
+
>>> url.replace('\\', '/')
|
52 |
+
'file:...toy.cfg'
|
53 |
+
>>> print(nltk.data.load(url))
|
54 |
+
Grammar with 14 productions (start state = S)
|
55 |
+
S -> NP VP
|
56 |
+
PP -> P NP
|
57 |
+
...
|
58 |
+
P -> 'on'
|
59 |
+
P -> 'in'
|
60 |
+
|
61 |
+
The second argument to the `nltk.data.load()` function specifies the
|
62 |
+
file format, which determines how the file's contents are processed
|
63 |
+
before they are returned by ``load()``. The formats that are
|
64 |
+
currently supported by the data module are described by the dictionary
|
65 |
+
`nltk.data.FORMATS`:
|
66 |
+
|
67 |
+
>>> for format, descr in sorted(nltk.data.FORMATS.items()):
|
68 |
+
... print('{0:<7} {1:}'.format(format, descr))
|
69 |
+
cfg A context free grammar.
|
70 |
+
fcfg A feature CFG.
|
71 |
+
fol A list of first order logic expressions, parsed with
|
72 |
+
nltk.sem.logic.Expression.fromstring.
|
73 |
+
json A serialized python object, stored using the json module.
|
74 |
+
logic A list of first order logic expressions, parsed with
|
75 |
+
nltk.sem.logic.LogicParser. Requires an additional logic_parser
|
76 |
+
parameter
|
77 |
+
pcfg A probabilistic CFG.
|
78 |
+
pickle A serialized python object, stored using the pickle
|
79 |
+
module.
|
80 |
+
raw The raw (byte string) contents of a file.
|
81 |
+
text The raw (unicode string) contents of a file.
|
82 |
+
val A semantic valuation, parsed by
|
83 |
+
nltk.sem.Valuation.fromstring.
|
84 |
+
yaml A serialized python object, stored using the yaml module.
|
85 |
+
|
86 |
+
`nltk.data.load()` will raise a ValueError if a bad format name is
|
87 |
+
specified:
|
88 |
+
|
89 |
+
>>> nltk.data.load('grammars/sample_grammars/toy.cfg', 'bar')
|
90 |
+
Traceback (most recent call last):
|
91 |
+
. . .
|
92 |
+
ValueError: Unknown format type!
|
93 |
+
|
94 |
+
By default, the ``"auto"`` format is used, which chooses a format
|
95 |
+
based on the filename's extension. The mapping from file extensions
|
96 |
+
to format names is specified by `nltk.data.AUTO_FORMATS`:
|
97 |
+
|
98 |
+
>>> for ext, format in sorted(nltk.data.AUTO_FORMATS.items()):
|
99 |
+
... print('.%-7s -> %s' % (ext, format))
|
100 |
+
.cfg -> cfg
|
101 |
+
.fcfg -> fcfg
|
102 |
+
.fol -> fol
|
103 |
+
.json -> json
|
104 |
+
.logic -> logic
|
105 |
+
.pcfg -> pcfg
|
106 |
+
.pickle -> pickle
|
107 |
+
.text -> text
|
108 |
+
.txt -> text
|
109 |
+
.val -> val
|
110 |
+
.yaml -> yaml
|
111 |
+
|
112 |
+
If `nltk.data.load()` is unable to determine the format based on the
|
113 |
+
filename's extension, it will raise a ValueError:
|
114 |
+
|
115 |
+
>>> nltk.data.load('foo.bar')
|
116 |
+
Traceback (most recent call last):
|
117 |
+
. . .
|
118 |
+
ValueError: Could not determine format for foo.bar based on its file
|
119 |
+
extension; use the "format" argument to specify the format explicitly.
|
120 |
+
|
121 |
+
Note that by explicitly specifying the ``format`` argument, you can
|
122 |
+
override the load method's default processing behavior. For example,
|
123 |
+
to get the raw contents of any file, simply use ``format="raw"``:
|
124 |
+
|
125 |
+
>>> s = nltk.data.load('grammars/sample_grammars/toy.cfg', 'text')
|
126 |
+
>>> print(s)
|
127 |
+
S -> NP VP
|
128 |
+
PP -> P NP
|
129 |
+
NP -> Det N | NP PP
|
130 |
+
VP -> V NP | VP PP
|
131 |
+
...
|
132 |
+
|
133 |
+
Making Local Copies
|
134 |
+
~~~~~~~~~~~~~~~~~~~
|
135 |
+
.. This will not be visible in the html output: create a tempdir to
|
136 |
+
play in.
|
137 |
+
>>> import tempfile, os
|
138 |
+
>>> tempdir = tempfile.mkdtemp()
|
139 |
+
>>> old_dir = os.path.abspath('.')
|
140 |
+
>>> os.chdir(tempdir)
|
141 |
+
|
142 |
+
The function `nltk.data.retrieve()` copies a given resource to a local
|
143 |
+
file. This can be useful, for example, if you want to edit one of the
|
144 |
+
sample grammars.
|
145 |
+
|
146 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
|
147 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy.cfg'
|
148 |
+
|
149 |
+
>>> # Simulate editing the grammar.
|
150 |
+
>>> with open('toy.cfg') as inp:
|
151 |
+
... s = inp.read().replace('NP', 'DP')
|
152 |
+
>>> with open('toy.cfg', 'w') as out:
|
153 |
+
... _bytes_written = out.write(s)
|
154 |
+
|
155 |
+
>>> # Load the edited grammar, & display it.
|
156 |
+
>>> cfg = nltk.data.load('file:///' + os.path.abspath('toy.cfg'))
|
157 |
+
>>> print(cfg)
|
158 |
+
Grammar with 14 productions (start state = S)
|
159 |
+
S -> DP VP
|
160 |
+
PP -> P DP
|
161 |
+
...
|
162 |
+
P -> 'on'
|
163 |
+
P -> 'in'
|
164 |
+
|
165 |
+
The second argument to `nltk.data.retrieve()` specifies the filename
|
166 |
+
for the new copy of the file. By default, the source file's filename
|
167 |
+
is used.
|
168 |
+
|
169 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg', 'mytoy.cfg')
|
170 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'mytoy.cfg'
|
171 |
+
>>> os.path.isfile('./mytoy.cfg')
|
172 |
+
True
|
173 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/np.fcfg')
|
174 |
+
Retrieving 'nltk:grammars/sample_grammars/np.fcfg', saving to 'np.fcfg'
|
175 |
+
>>> os.path.isfile('./np.fcfg')
|
176 |
+
True
|
177 |
+
|
178 |
+
If a file with the specified (or default) filename already exists in
|
179 |
+
the current directory, then `nltk.data.retrieve()` will raise a
|
180 |
+
ValueError exception. It will *not* overwrite the file:
|
181 |
+
|
182 |
+
>>> os.path.isfile('./toy.cfg')
|
183 |
+
True
|
184 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
|
185 |
+
Traceback (most recent call last):
|
186 |
+
. . .
|
187 |
+
ValueError: File '...toy.cfg' already exists!
|
188 |
+
|
189 |
+
.. This will not be visible in the html output: clean up the tempdir.
|
190 |
+
>>> os.chdir(old_dir)
|
191 |
+
>>> for f in os.listdir(tempdir):
|
192 |
+
... os.remove(os.path.join(tempdir, f))
|
193 |
+
>>> os.rmdir(tempdir)
|
194 |
+
|
195 |
+
Finding Files in the NLTK Data Package
|
196 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
197 |
+
The `nltk.data.find()` function searches the NLTK data package for a
|
198 |
+
given file, and returns a pointer to that file. This pointer can
|
199 |
+
either be a `FileSystemPathPointer` (whose `path` attribute gives the
|
200 |
+
absolute path of the file); or a `ZipFilePathPointer`, specifying a
|
201 |
+
zipfile and the name of an entry within that zipfile. Both pointer
|
202 |
+
types define the `open()` method, which can be used to read the string
|
203 |
+
contents of the file.
|
204 |
+
|
205 |
+
>>> path = nltk.data.find('corpora/abc/rural.txt')
|
206 |
+
>>> str(path)
|
207 |
+
'...rural.txt'
|
208 |
+
>>> print(path.open().read(60).decode())
|
209 |
+
PM denies knowledge of AWB kickbacks
|
210 |
+
The Prime Minister has
|
211 |
+
|
212 |
+
Alternatively, the `nltk.data.load()` function can be used with the
|
213 |
+
keyword argument ``format="raw"``:
|
214 |
+
|
215 |
+
>>> s = nltk.data.load('corpora/abc/rural.txt', format='raw')[:60]
|
216 |
+
>>> print(s.decode())
|
217 |
+
PM denies knowledge of AWB kickbacks
|
218 |
+
The Prime Minister has
|
219 |
+
|
220 |
+
Alternatively, you can use the keyword argument ``format="text"``:
|
221 |
+
|
222 |
+
>>> s = nltk.data.load('corpora/abc/rural.txt', format='text')[:60]
|
223 |
+
>>> print(s)
|
224 |
+
PM denies knowledge of AWB kickbacks
|
225 |
+
The Prime Minister has
|
226 |
+
|
227 |
+
Resource Caching
|
228 |
+
~~~~~~~~~~~~~~~~
|
229 |
+
|
230 |
+
NLTK uses a weakref dictionary to maintain a cache of resources that
|
231 |
+
have been loaded. If you load a resource that is already stored in
|
232 |
+
the cache, then the cached copy will be returned. This behavior can
|
233 |
+
be seen by the trace output generated when verbose=True:
|
234 |
+
|
235 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
|
236 |
+
<<Loading nltk:grammars/book_grammars/feat0.fcfg>>
|
237 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
|
238 |
+
<<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
|
239 |
+
|
240 |
+
If you wish to load a resource from its source, bypassing the cache,
|
241 |
+
use the ``cache=False`` argument to `nltk.data.load()`. This can be
|
242 |
+
useful, for example, if the resource is loaded from a local file, and
|
243 |
+
you are actively editing that file:
|
244 |
+
|
245 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',cache=False,verbose=True)
|
246 |
+
<<Loading nltk:grammars/book_grammars/feat0.fcfg>>
|
247 |
+
|
248 |
+
The cache *no longer* uses weak references. A resource will not be
|
249 |
+
automatically expunged from the cache when no more objects are using
|
250 |
+
it. In the following example, when we clear the variable ``feat0``,
|
251 |
+
the reference count for the feature grammar object drops to zero.
|
252 |
+
However, the object remains cached:
|
253 |
+
|
254 |
+
>>> del feat0
|
255 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',
|
256 |
+
... verbose=True)
|
257 |
+
<<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
|
258 |
+
|
259 |
+
You can clear the entire contents of the cache, using
|
260 |
+
`nltk.data.clear_cache()`:
|
261 |
+
|
262 |
+
>>> nltk.data.clear_cache()
|
263 |
+
|
264 |
+
Retrieving other Data Sources
|
265 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
266 |
+
>>> formulas = nltk.data.load('grammars/book_grammars/background.fol')
|
267 |
+
>>> for f in formulas: print(str(f))
|
268 |
+
all x.(boxerdog(x) -> dog(x))
|
269 |
+
all x.(boxer(x) -> person(x))
|
270 |
+
all x.-(dog(x) & person(x))
|
271 |
+
all x.(married(x) <-> exists y.marry(x,y))
|
272 |
+
all x.(bark(x) -> dog(x))
|
273 |
+
all x y.(marry(x,y) -> (person(x) & person(y)))
|
274 |
+
-(Vincent = Mia)
|
275 |
+
-(Vincent = Fido)
|
276 |
+
-(Mia = Fido)
|
277 |
+
|
278 |
+
Regression Tests
|
279 |
+
~~~~~~~~~~~~~~~~
|
280 |
+
Create a temp dir for tests that write files:
|
281 |
+
|
282 |
+
>>> import tempfile, os
|
283 |
+
>>> tempdir = tempfile.mkdtemp()
|
284 |
+
>>> old_dir = os.path.abspath('.')
|
285 |
+
>>> os.chdir(tempdir)
|
286 |
+
|
287 |
+
The `retrieve()` function accepts all url types:
|
288 |
+
|
289 |
+
>>> urls = ['https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg',
|
290 |
+
... 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg'),
|
291 |
+
... 'nltk:grammars/sample_grammars/toy.cfg',
|
292 |
+
... 'grammars/sample_grammars/toy.cfg']
|
293 |
+
>>> for i, url in enumerate(urls):
|
294 |
+
... nltk.data.retrieve(url, 'toy-%d.cfg' % i)
|
295 |
+
Retrieving 'https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', saving to 'toy-0.cfg'
|
296 |
+
Retrieving 'file:...toy.cfg', saving to 'toy-1.cfg'
|
297 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-2.cfg'
|
298 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-3.cfg'
|
299 |
+
|
300 |
+
Clean up the temp dir:
|
301 |
+
|
302 |
+
>>> os.chdir(old_dir)
|
303 |
+
>>> for f in os.listdir(tempdir):
|
304 |
+
... os.remove(os.path.join(tempdir, f))
|
305 |
+
>>> os.rmdir(tempdir)
|
306 |
+
|
307 |
+
Lazy Loader
|
308 |
+
-----------
|
309 |
+
A lazy loader is a wrapper object that defers loading a resource until
|
310 |
+
it is accessed or used in any way. This is mainly intended for
|
311 |
+
internal use by NLTK's corpus readers.
|
312 |
+
|
313 |
+
>>> # Create a lazy loader for toy.cfg.
|
314 |
+
>>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
|
315 |
+
|
316 |
+
>>> # Show that it's not loaded yet:
|
317 |
+
>>> object.__repr__(ll)
|
318 |
+
'<nltk.data.LazyLoader object at ...>'
|
319 |
+
|
320 |
+
>>> # printing it is enough to cause it to be loaded:
|
321 |
+
>>> print(ll)
|
322 |
+
<Grammar with 14 productions>
|
323 |
+
|
324 |
+
>>> # Show that it's now been loaded:
|
325 |
+
>>> object.__repr__(ll)
|
326 |
+
'<nltk.grammar.CFG object at ...>'
|
327 |
+
|
328 |
+
|
329 |
+
>>> # Test that accessing an attribute also loads it:
|
330 |
+
>>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
|
331 |
+
>>> ll.start()
|
332 |
+
S
|
333 |
+
>>> object.__repr__(ll)
|
334 |
+
'<nltk.grammar.CFG object at ...>'
|
335 |
+
|
336 |
+
Buffered Gzip Reading and Writing
|
337 |
+
---------------------------------
|
338 |
+
Write performance to gzip-compressed is extremely poor when the files become large.
|
339 |
+
File creation can become a bottleneck in those cases.
|
340 |
+
|
341 |
+
Read performance from large gzipped pickle files was improved in data.py by
|
342 |
+
buffering the reads. A similar fix can be applied to writes by buffering
|
343 |
+
the writes to a StringIO object first.
|
344 |
+
|
345 |
+
This is mainly intended for internal use. The test simply tests that reading
|
346 |
+
and writing work as intended and does not test how much improvement buffering
|
347 |
+
provides.
|
348 |
+
|
349 |
+
>>> from io import StringIO
|
350 |
+
>>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'wb', size=2**10)
|
351 |
+
>>> ans = []
|
352 |
+
>>> for i in range(10000):
|
353 |
+
... ans.append(str(i).encode('ascii'))
|
354 |
+
... test.write(str(i).encode('ascii'))
|
355 |
+
>>> test.close()
|
356 |
+
>>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'rb')
|
357 |
+
>>> test.read() == b''.join(ans)
|
358 |
+
True
|
359 |
+
>>> test.close()
|
360 |
+
>>> import os
|
361 |
+
>>> os.unlink('testbuf.gz')
|
362 |
+
|
363 |
+
JSON Encoding and Decoding
|
364 |
+
--------------------------
|
365 |
+
JSON serialization is used instead of pickle for some classes.
|
366 |
+
|
367 |
+
>>> from nltk import jsontags
|
368 |
+
>>> from nltk.jsontags import JSONTaggedEncoder, JSONTaggedDecoder, register_tag
|
369 |
+
>>> @jsontags.register_tag
|
370 |
+
... class JSONSerializable:
|
371 |
+
... json_tag = 'JSONSerializable'
|
372 |
+
...
|
373 |
+
... def __init__(self, n):
|
374 |
+
... self.n = n
|
375 |
+
...
|
376 |
+
... def encode_json_obj(self):
|
377 |
+
... return self.n
|
378 |
+
...
|
379 |
+
... @classmethod
|
380 |
+
... def decode_json_obj(cls, obj):
|
381 |
+
... n = obj
|
382 |
+
... return cls(n)
|
383 |
+
...
|
384 |
+
>>> JSONTaggedEncoder().encode(JSONSerializable(1))
|
385 |
+
'{"!JSONSerializable": 1}'
|
386 |
+
>>> JSONTaggedDecoder().decode('{"!JSONSerializable": 1}').n
|
387 |
+
1
|
venv/lib/python3.10/site-packages/nltk/test/dependency.doctest
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===================
|
5 |
+
Dependency Grammars
|
6 |
+
===================
|
7 |
+
|
8 |
+
>>> from nltk.grammar import DependencyGrammar
|
9 |
+
>>> from nltk.parse import (
|
10 |
+
... DependencyGraph,
|
11 |
+
... ProjectiveDependencyParser,
|
12 |
+
... NonprojectiveDependencyParser,
|
13 |
+
... )
|
14 |
+
|
15 |
+
CoNLL Data
|
16 |
+
----------
|
17 |
+
|
18 |
+
>>> treebank_data = """Pierre NNP 2 NMOD
|
19 |
+
... Vinken NNP 8 SUB
|
20 |
+
... , , 2 P
|
21 |
+
... 61 CD 5 NMOD
|
22 |
+
... years NNS 6 AMOD
|
23 |
+
... old JJ 2 NMOD
|
24 |
+
... , , 2 P
|
25 |
+
... will MD 0 ROOT
|
26 |
+
... join VB 8 VC
|
27 |
+
... the DT 11 NMOD
|
28 |
+
... board NN 9 OBJ
|
29 |
+
... as IN 9 VMOD
|
30 |
+
... a DT 15 NMOD
|
31 |
+
... nonexecutive JJ 15 NMOD
|
32 |
+
... director NN 12 PMOD
|
33 |
+
... Nov. NNP 9 VMOD
|
34 |
+
... 29 CD 16 NMOD
|
35 |
+
... . . 9 VMOD
|
36 |
+
... """
|
37 |
+
|
38 |
+
>>> dg = DependencyGraph(treebank_data)
|
39 |
+
>>> dg.tree().pprint()
|
40 |
+
(will
|
41 |
+
(Vinken Pierre , (old (years 61)) ,)
|
42 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29) .))
|
43 |
+
>>> for head, rel, dep in dg.triples():
|
44 |
+
... print(
|
45 |
+
... '({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})'
|
46 |
+
... .format(h=head, r=rel, d=dep)
|
47 |
+
... )
|
48 |
+
(will, MD), SUB, (Vinken, NNP)
|
49 |
+
(Vinken, NNP), NMOD, (Pierre, NNP)
|
50 |
+
(Vinken, NNP), P, (,, ,)
|
51 |
+
(Vinken, NNP), NMOD, (old, JJ)
|
52 |
+
(old, JJ), AMOD, (years, NNS)
|
53 |
+
(years, NNS), NMOD, (61, CD)
|
54 |
+
(Vinken, NNP), P, (,, ,)
|
55 |
+
(will, MD), VC, (join, VB)
|
56 |
+
(join, VB), OBJ, (board, NN)
|
57 |
+
(board, NN), NMOD, (the, DT)
|
58 |
+
(join, VB), VMOD, (as, IN)
|
59 |
+
(as, IN), PMOD, (director, NN)
|
60 |
+
(director, NN), NMOD, (a, DT)
|
61 |
+
(director, NN), NMOD, (nonexecutive, JJ)
|
62 |
+
(join, VB), VMOD, (Nov., NNP)
|
63 |
+
(Nov., NNP), NMOD, (29, CD)
|
64 |
+
(join, VB), VMOD, (., .)
|
65 |
+
|
66 |
+
Using a custom cell extractor.
|
67 |
+
|
68 |
+
>>> def custom_extractor(cells):
|
69 |
+
... _, tag, head, rel = cells
|
70 |
+
... return 'spam', 'spam', tag, tag, '', head, rel
|
71 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
72 |
+
>>> dg.tree().pprint()
|
73 |
+
(spam
|
74 |
+
(spam spam spam (spam (spam spam)) spam)
|
75 |
+
(spam (spam spam) (spam (spam spam spam)) (spam spam) spam))
|
76 |
+
|
77 |
+
Custom cell extractors can take in and return an index.
|
78 |
+
|
79 |
+
>>> def custom_extractor(cells, index):
|
80 |
+
... word, tag, head, rel = cells
|
81 |
+
... return (index, '{}-{}'.format(word, index), word,
|
82 |
+
... tag, tag, '', head, rel)
|
83 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
84 |
+
>>> dg.tree().pprint()
|
85 |
+
(will-8
|
86 |
+
(Vinken-2 Pierre-1 ,-3 (old-6 (years-5 61-4)) ,-7)
|
87 |
+
(join-9
|
88 |
+
(board-11 the-10)
|
89 |
+
(as-12 (director-15 a-13 nonexecutive-14))
|
90 |
+
(Nov.-16 29-17)
|
91 |
+
.-18))
|
92 |
+
|
93 |
+
Using the dependency-parsed version of the Penn Treebank corpus sample.
|
94 |
+
|
95 |
+
>>> from nltk.corpus import dependency_treebank
|
96 |
+
>>> t = dependency_treebank.parsed_sents()[0]
|
97 |
+
>>> print(t.to_conll(3))
|
98 |
+
Pierre NNP 2
|
99 |
+
Vinken NNP 8
|
100 |
+
, , 2
|
101 |
+
61 CD 5
|
102 |
+
years NNS 6
|
103 |
+
old JJ 2
|
104 |
+
, , 2
|
105 |
+
will MD 0
|
106 |
+
join VB 8
|
107 |
+
the DT 11
|
108 |
+
board NN 9
|
109 |
+
as IN 9
|
110 |
+
a DT 15
|
111 |
+
nonexecutive JJ 15
|
112 |
+
director NN 12
|
113 |
+
Nov. NNP 9
|
114 |
+
29 CD 16
|
115 |
+
. . 8
|
116 |
+
|
117 |
+
Using the output of zpar (like Malt-TAB but with zero-based indexing)
|
118 |
+
|
119 |
+
>>> zpar_data = """
|
120 |
+
... Pierre NNP 1 NMOD
|
121 |
+
... Vinken NNP 7 SUB
|
122 |
+
... , , 1 P
|
123 |
+
... 61 CD 4 NMOD
|
124 |
+
... years NNS 5 AMOD
|
125 |
+
... old JJ 1 NMOD
|
126 |
+
... , , 1 P
|
127 |
+
... will MD -1 ROOT
|
128 |
+
... join VB 7 VC
|
129 |
+
... the DT 10 NMOD
|
130 |
+
... board NN 8 OBJ
|
131 |
+
... as IN 8 VMOD
|
132 |
+
... a DT 14 NMOD
|
133 |
+
... nonexecutive JJ 14 NMOD
|
134 |
+
... director NN 11 PMOD
|
135 |
+
... Nov. NNP 8 VMOD
|
136 |
+
... 29 CD 15 NMOD
|
137 |
+
... . . 7 P
|
138 |
+
... """
|
139 |
+
|
140 |
+
>>> zdg = DependencyGraph(zpar_data, zero_based=True)
|
141 |
+
>>> print(zdg.tree())
|
142 |
+
(will
|
143 |
+
(Vinken Pierre , (old (years 61)) ,)
|
144 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29))
|
145 |
+
.)
|
146 |
+
|
147 |
+
|
148 |
+
Projective Dependency Parsing
|
149 |
+
-----------------------------
|
150 |
+
|
151 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
152 |
+
... 'fell' -> 'price' | 'stock'
|
153 |
+
... 'price' -> 'of' 'the'
|
154 |
+
... 'of' -> 'stock'
|
155 |
+
... 'stock' -> 'the'
|
156 |
+
... """)
|
157 |
+
>>> print(grammar)
|
158 |
+
Dependency grammar with 5 productions
|
159 |
+
'fell' -> 'price'
|
160 |
+
'fell' -> 'stock'
|
161 |
+
'price' -> 'of' 'the'
|
162 |
+
'of' -> 'stock'
|
163 |
+
'stock' -> 'the'
|
164 |
+
|
165 |
+
>>> dp = ProjectiveDependencyParser(grammar)
|
166 |
+
>>> for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])):
|
167 |
+
... print(t)
|
168 |
+
(fell (price the (of (stock the))))
|
169 |
+
(fell (price the of) (stock the))
|
170 |
+
(fell (price the of the) stock)
|
171 |
+
|
172 |
+
Non-Projective Dependency Parsing
|
173 |
+
---------------------------------
|
174 |
+
|
175 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
176 |
+
... 'taught' -> 'play' | 'man'
|
177 |
+
... 'man' -> 'the'
|
178 |
+
... 'play' -> 'golf' | 'dog' | 'to'
|
179 |
+
... 'dog' -> 'his'
|
180 |
+
... """)
|
181 |
+
>>> print(grammar)
|
182 |
+
Dependency grammar with 7 productions
|
183 |
+
'taught' -> 'play'
|
184 |
+
'taught' -> 'man'
|
185 |
+
'man' -> 'the'
|
186 |
+
'play' -> 'golf'
|
187 |
+
'play' -> 'dog'
|
188 |
+
'play' -> 'to'
|
189 |
+
'dog' -> 'his'
|
190 |
+
|
191 |
+
>>> dp = NonprojectiveDependencyParser(grammar)
|
192 |
+
>>> g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
|
193 |
+
|
194 |
+
>>> print(g.root['word'])
|
195 |
+
taught
|
196 |
+
|
197 |
+
>>> for _, node in sorted(g.nodes.items()):
|
198 |
+
... if node['word'] is not None:
|
199 |
+
... print('{address} {word}: {d}'.format(d=node['deps'][''], **node))
|
200 |
+
1 the: []
|
201 |
+
2 man: [1]
|
202 |
+
3 taught: [2, 7]
|
203 |
+
4 his: []
|
204 |
+
5 dog: [4]
|
205 |
+
6 to: []
|
206 |
+
7 play: [5, 6, 8]
|
207 |
+
8 golf: []
|
208 |
+
|
209 |
+
>>> print(g.tree())
|
210 |
+
(taught (man the) (play (dog his) to golf))
|
211 |
+
|
212 |
+
Integration with MALT parser
|
213 |
+
============================
|
214 |
+
|
215 |
+
In case the top relation is different from the default, we can set it. In case
|
216 |
+
of MALT parser, it's set to `'null'`.
|
217 |
+
|
218 |
+
>>> dg_str = """1 I _ NN NN _ 2 nn _ _
|
219 |
+
... 2 shot _ NN NN _ 0 null _ _
|
220 |
+
... 3 an _ AT AT _ 2 dep _ _
|
221 |
+
... 4 elephant _ NN NN _ 7 nn _ _
|
222 |
+
... 5 in _ NN NN _ 7 nn _ _
|
223 |
+
... 6 my _ NN NN _ 7 nn _ _
|
224 |
+
... 7 pajamas _ NNS NNS _ 3 dobj _ _
|
225 |
+
... """
|
226 |
+
>>> dg = DependencyGraph(dg_str, top_relation_label='null')
|
227 |
+
|
228 |
+
>>> len(dg.nodes)
|
229 |
+
8
|
230 |
+
|
231 |
+
>>> dg.root['word'], dg.root['address']
|
232 |
+
('shot', 2)
|
233 |
+
|
234 |
+
>>> print(dg.to_conll(10))
|
235 |
+
1 I _ NN NN _ 2 nn _ _
|
236 |
+
2 shot _ NN NN _ 0 null _ _
|
237 |
+
3 an _ AT AT _ 2 dep _ _
|
238 |
+
4 elephant _ NN NN _ 7 nn _ _
|
239 |
+
5 in _ NN NN _ 7 nn _ _
|
240 |
+
6 my _ NN NN _ 7 nn _ _
|
241 |
+
7 pajamas _ NNS NNS _ 3 dobj _ _
|
venv/lib/python3.10/site-packages/nltk/test/discourse.doctest
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==================
|
5 |
+
Discourse Checking
|
6 |
+
==================
|
7 |
+
|
8 |
+
>>> from nltk import *
|
9 |
+
>>> from nltk.sem import logic
|
10 |
+
>>> logic._counter._value = 0
|
11 |
+
|
12 |
+
Setup
|
13 |
+
=====
|
14 |
+
|
15 |
+
>>> from nltk.test.childes_fixt import setup_module
|
16 |
+
>>> setup_module()
|
17 |
+
|
18 |
+
Introduction
|
19 |
+
============
|
20 |
+
|
21 |
+
The NLTK discourse module makes it possible to test consistency and
|
22 |
+
redundancy of simple discourses, using theorem-proving and
|
23 |
+
model-building from `nltk.inference`.
|
24 |
+
|
25 |
+
The ``DiscourseTester`` constructor takes a list of sentences as a
|
26 |
+
parameter.
|
27 |
+
|
28 |
+
>>> dt = DiscourseTester(['a boxer walks', 'every boxer chases a girl'])
|
29 |
+
|
30 |
+
The ``DiscourseTester`` parses each sentence into a list of logical
|
31 |
+
forms. Once we have created ``DiscourseTester`` object, we can
|
32 |
+
inspect various properties of the discourse. First off, we might want
|
33 |
+
to double-check what sentences are currently stored as the discourse.
|
34 |
+
|
35 |
+
>>> dt.sentences()
|
36 |
+
s0: a boxer walks
|
37 |
+
s1: every boxer chases a girl
|
38 |
+
|
39 |
+
As you will see, each sentence receives an identifier `s`\ :subscript:`i`.
|
40 |
+
We might also want to check what grammar the ``DiscourseTester`` is
|
41 |
+
using (by default, ``book_grammars/discourse.fcfg``):
|
42 |
+
|
43 |
+
>>> dt.grammar()
|
44 |
+
% start S
|
45 |
+
# Grammar Rules
|
46 |
+
S[SEM = <app(?subj,?vp)>] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp]
|
47 |
+
NP[NUM=?n,SEM=<app(?det,?nom)> ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom]
|
48 |
+
NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np]
|
49 |
+
...
|
50 |
+
|
51 |
+
A different grammar can be invoked by using the optional ``gramfile``
|
52 |
+
parameter when a ``DiscourseTester`` object is created.
|
53 |
+
|
54 |
+
Readings and Threads
|
55 |
+
====================
|
56 |
+
|
57 |
+
Depending on
|
58 |
+
the grammar used, we may find some sentences have more than one
|
59 |
+
logical form. To check this, use the ``readings()`` method. Given a
|
60 |
+
sentence identifier of the form `s`\ :subscript:`i`, each reading of
|
61 |
+
that sentence is given an identifier `s`\ :sub:`i`-`r`\ :sub:`j`.
|
62 |
+
|
63 |
+
|
64 |
+
>>> dt.readings()
|
65 |
+
<BLANKLINE>
|
66 |
+
s0 readings:
|
67 |
+
<BLANKLINE>
|
68 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
69 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
70 |
+
<BLANKLINE>
|
71 |
+
s1 readings:
|
72 |
+
<BLANKLINE>
|
73 |
+
s1-r0: all z2.(boxer(z2) -> exists z3.(girl(z3) & chase(z2,z3)))
|
74 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
75 |
+
|
76 |
+
|
77 |
+
In this case, the only source of ambiguity lies in the word *boxer*,
|
78 |
+
which receives two translations: ``boxer`` and ``boxerdog``. The
|
79 |
+
intention is that one of these corresponds to the ``person`` sense and
|
80 |
+
one to the ``dog`` sense. In principle, we would also expect to see a
|
81 |
+
quantifier scope ambiguity in ``s1``. However, the simple grammar we
|
82 |
+
are using, namely `sem4.fcfg <sem4.fcfg>`_, doesn't support quantifier
|
83 |
+
scope ambiguity.
|
84 |
+
|
85 |
+
We can also investigate the readings of a specific sentence:
|
86 |
+
|
87 |
+
>>> dt.readings('a boxer walks')
|
88 |
+
The sentence 'a boxer walks' has these readings:
|
89 |
+
exists x.(boxer(x) & walk(x))
|
90 |
+
exists x.(boxerdog(x) & walk(x))
|
91 |
+
|
92 |
+
Given that each sentence is two-ways ambiguous, we potentially have
|
93 |
+
four different discourse 'threads', taking all combinations of
|
94 |
+
readings. To see these, specify the ``threaded=True`` parameter on
|
95 |
+
the ``readings()`` method. Again, each thread is assigned an
|
96 |
+
identifier of the form `d`\ :sub:`i`. Following the identifier is a
|
97 |
+
list of the readings that constitute that thread.
|
98 |
+
|
99 |
+
>>> dt.readings(threaded=True)
|
100 |
+
d0: ['s0-r0', 's1-r0']
|
101 |
+
d1: ['s0-r0', 's1-r1']
|
102 |
+
d2: ['s0-r1', 's1-r0']
|
103 |
+
d3: ['s0-r1', 's1-r1']
|
104 |
+
|
105 |
+
Of course, this simple-minded approach doesn't scale: a discourse with, say, three
|
106 |
+
sentences, each of which has 3 readings, will generate 27 different
|
107 |
+
threads. It is an interesting exercise to consider how to manage
|
108 |
+
discourse ambiguity more efficiently.
|
109 |
+
|
110 |
+
Checking Consistency
|
111 |
+
====================
|
112 |
+
|
113 |
+
Now, we can check whether some or all of the discourse threads are
|
114 |
+
consistent, using the ``models()`` method. With no parameter, this
|
115 |
+
method will try to find a model for every discourse thread in the
|
116 |
+
current discourse. However, we can also specify just one thread, say ``d1``.
|
117 |
+
|
118 |
+
>>> dt.models('d1')
|
119 |
+
--------------------------------------------------------------------------------
|
120 |
+
Model for Discourse Thread d1
|
121 |
+
--------------------------------------------------------------------------------
|
122 |
+
% number = 1
|
123 |
+
% seconds = 0
|
124 |
+
<BLANKLINE>
|
125 |
+
% Interpretation of size 2
|
126 |
+
<BLANKLINE>
|
127 |
+
c1 = 0.
|
128 |
+
<BLANKLINE>
|
129 |
+
f1(0) = 0.
|
130 |
+
f1(1) = 0.
|
131 |
+
<BLANKLINE>
|
132 |
+
boxer(0).
|
133 |
+
- boxer(1).
|
134 |
+
<BLANKLINE>
|
135 |
+
- boxerdog(0).
|
136 |
+
- boxerdog(1).
|
137 |
+
<BLANKLINE>
|
138 |
+
- girl(0).
|
139 |
+
- girl(1).
|
140 |
+
<BLANKLINE>
|
141 |
+
walk(0).
|
142 |
+
- walk(1).
|
143 |
+
<BLANKLINE>
|
144 |
+
- chase(0,0).
|
145 |
+
- chase(0,1).
|
146 |
+
- chase(1,0).
|
147 |
+
- chase(1,1).
|
148 |
+
<BLANKLINE>
|
149 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1']:
|
150 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
151 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
152 |
+
<BLANKLINE>
|
153 |
+
|
154 |
+
There are various formats for rendering **Mace4** models --- here,
|
155 |
+
we have used the 'cooked' format (which is intended to be
|
156 |
+
human-readable). There are a number of points to note.
|
157 |
+
|
158 |
+
#. The entities in the domain are all treated as non-negative
|
159 |
+
integers. In this case, there are only two entities, ``0`` and
|
160 |
+
``1``.
|
161 |
+
|
162 |
+
#. The ``-`` symbol indicates negation. So ``0`` is the only
|
163 |
+
``boxerdog`` and the only thing that ``walk``\ s. Nothing is a
|
164 |
+
``boxer``, or a ``girl`` or in the ``chase`` relation. Thus the
|
165 |
+
universal sentence is vacuously true.
|
166 |
+
|
167 |
+
#. ``c1`` is an introduced constant that denotes ``0``.
|
168 |
+
|
169 |
+
#. ``f1`` is a Skolem function, but it plays no significant role in
|
170 |
+
this model.
|
171 |
+
|
172 |
+
|
173 |
+
We might want to now add another sentence to the discourse, and there
|
174 |
+
is method ``add_sentence()`` for doing just this.
|
175 |
+
|
176 |
+
>>> dt.add_sentence('John is a boxer')
|
177 |
+
>>> dt.sentences()
|
178 |
+
s0: a boxer walks
|
179 |
+
s1: every boxer chases a girl
|
180 |
+
s2: John is a boxer
|
181 |
+
|
182 |
+
We can now test all the properties as before; here, we just show a
|
183 |
+
couple of them.
|
184 |
+
|
185 |
+
>>> dt.readings()
|
186 |
+
<BLANKLINE>
|
187 |
+
s0 readings:
|
188 |
+
<BLANKLINE>
|
189 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
190 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
191 |
+
<BLANKLINE>
|
192 |
+
s1 readings:
|
193 |
+
<BLANKLINE>
|
194 |
+
s1-r0: all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
195 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
196 |
+
<BLANKLINE>
|
197 |
+
s2 readings:
|
198 |
+
<BLANKLINE>
|
199 |
+
s2-r0: boxer(John)
|
200 |
+
s2-r1: boxerdog(John)
|
201 |
+
>>> dt.readings(threaded=True)
|
202 |
+
d0: ['s0-r0', 's1-r0', 's2-r0']
|
203 |
+
d1: ['s0-r0', 's1-r0', 's2-r1']
|
204 |
+
d2: ['s0-r0', 's1-r1', 's2-r0']
|
205 |
+
d3: ['s0-r0', 's1-r1', 's2-r1']
|
206 |
+
d4: ['s0-r1', 's1-r0', 's2-r0']
|
207 |
+
d5: ['s0-r1', 's1-r0', 's2-r1']
|
208 |
+
d6: ['s0-r1', 's1-r1', 's2-r0']
|
209 |
+
d7: ['s0-r1', 's1-r1', 's2-r1']
|
210 |
+
|
211 |
+
If you are interested in a particular thread, the ``expand_threads()``
|
212 |
+
method will remind you of what readings it consists of:
|
213 |
+
|
214 |
+
>>> thread = dt.expand_threads('d1')
|
215 |
+
>>> for rid, reading in thread:
|
216 |
+
... print(rid, str(reading.normalize()))
|
217 |
+
s0-r0 exists z1.(boxer(z1) & walk(z1))
|
218 |
+
s1-r0 all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
219 |
+
s2-r1 boxerdog(John)
|
220 |
+
|
221 |
+
Suppose we have already defined a discourse, as follows:
|
222 |
+
|
223 |
+
>>> dt = DiscourseTester(['A student dances', 'Every student is a person'])
|
224 |
+
|
225 |
+
Now, when we add a new sentence, is it consistent with what we already
|
226 |
+
have? The `` consistchk=True`` parameter of ``add_sentence()`` allows
|
227 |
+
us to check:
|
228 |
+
|
229 |
+
>>> dt.add_sentence('No person dances', consistchk=True)
|
230 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
231 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
232 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
233 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
234 |
+
<BLANKLINE>
|
235 |
+
>>> dt.readings()
|
236 |
+
<BLANKLINE>
|
237 |
+
s0 readings:
|
238 |
+
<BLANKLINE>
|
239 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
240 |
+
<BLANKLINE>
|
241 |
+
s1 readings:
|
242 |
+
<BLANKLINE>
|
243 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
244 |
+
<BLANKLINE>
|
245 |
+
s2 readings:
|
246 |
+
<BLANKLINE>
|
247 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
248 |
+
|
249 |
+
So let's retract the inconsistent sentence:
|
250 |
+
|
251 |
+
>>> dt.retract_sentence('No person dances', verbose=True)
|
252 |
+
Current sentences are
|
253 |
+
s0: A student dances
|
254 |
+
s1: Every student is a person
|
255 |
+
|
256 |
+
We can now verify that result is consistent.
|
257 |
+
|
258 |
+
>>> dt.models()
|
259 |
+
--------------------------------------------------------------------------------
|
260 |
+
Model for Discourse Thread d0
|
261 |
+
--------------------------------------------------------------------------------
|
262 |
+
% number = 1
|
263 |
+
% seconds = 0
|
264 |
+
<BLANKLINE>
|
265 |
+
% Interpretation of size 2
|
266 |
+
<BLANKLINE>
|
267 |
+
c1 = 0.
|
268 |
+
<BLANKLINE>
|
269 |
+
dance(0).
|
270 |
+
- dance(1).
|
271 |
+
<BLANKLINE>
|
272 |
+
person(0).
|
273 |
+
- person(1).
|
274 |
+
<BLANKLINE>
|
275 |
+
student(0).
|
276 |
+
- student(1).
|
277 |
+
<BLANKLINE>
|
278 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0']:
|
279 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
280 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
281 |
+
<BLANKLINE>
|
282 |
+
|
283 |
+
Checking Informativity
|
284 |
+
======================
|
285 |
+
|
286 |
+
Let's assume that we are still trying to extend the discourse *A
|
287 |
+
student dances.* *Every student is a person.* We add a new sentence,
|
288 |
+
but this time, we check whether it is informative with respect to what
|
289 |
+
has gone before.
|
290 |
+
|
291 |
+
>>> dt.add_sentence('A person dances', informchk=True)
|
292 |
+
Sentence 'A person dances' under reading 'exists x.(person(x) & dance(x))':
|
293 |
+
Not informative relative to thread 'd0'
|
294 |
+
|
295 |
+
In fact, we are just checking whether the new sentence is entailed by
|
296 |
+
the preceding discourse.
|
297 |
+
|
298 |
+
>>> dt.models()
|
299 |
+
--------------------------------------------------------------------------------
|
300 |
+
Model for Discourse Thread d0
|
301 |
+
--------------------------------------------------------------------------------
|
302 |
+
% number = 1
|
303 |
+
% seconds = 0
|
304 |
+
<BLANKLINE>
|
305 |
+
% Interpretation of size 2
|
306 |
+
<BLANKLINE>
|
307 |
+
c1 = 0.
|
308 |
+
<BLANKLINE>
|
309 |
+
c2 = 0.
|
310 |
+
<BLANKLINE>
|
311 |
+
dance(0).
|
312 |
+
- dance(1).
|
313 |
+
<BLANKLINE>
|
314 |
+
person(0).
|
315 |
+
- person(1).
|
316 |
+
<BLANKLINE>
|
317 |
+
student(0).
|
318 |
+
- student(1).
|
319 |
+
<BLANKLINE>
|
320 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
321 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
322 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
323 |
+
s2-r0: exists z1.(person(z1) & dance(z1))
|
324 |
+
<BLANKLINE>
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
Adding Background Knowledge
|
329 |
+
===========================
|
330 |
+
|
331 |
+
Let's build a new discourse, and look at the readings of the component sentences:
|
332 |
+
|
333 |
+
>>> dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks'])
|
334 |
+
>>> dt.readings()
|
335 |
+
<BLANKLINE>
|
336 |
+
s0 readings:
|
337 |
+
<BLANKLINE>
|
338 |
+
s0-r0: boxer(Vincent)
|
339 |
+
s0-r1: boxerdog(Vincent)
|
340 |
+
<BLANKLINE>
|
341 |
+
s1 readings:
|
342 |
+
<BLANKLINE>
|
343 |
+
s1-r0: boxer(Fido)
|
344 |
+
s1-r1: boxerdog(Fido)
|
345 |
+
<BLANKLINE>
|
346 |
+
s2 readings:
|
347 |
+
<BLANKLINE>
|
348 |
+
s2-r0: married(Vincent)
|
349 |
+
<BLANKLINE>
|
350 |
+
s3 readings:
|
351 |
+
<BLANKLINE>
|
352 |
+
s3-r0: bark(Fido)
|
353 |
+
|
354 |
+
This gives us a lot of threads:
|
355 |
+
|
356 |
+
>>> dt.readings(threaded=True)
|
357 |
+
d0: ['s0-r0', 's1-r0', 's2-r0', 's3-r0']
|
358 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
359 |
+
d2: ['s0-r1', 's1-r0', 's2-r0', 's3-r0']
|
360 |
+
d3: ['s0-r1', 's1-r1', 's2-r0', 's3-r0']
|
361 |
+
|
362 |
+
|
363 |
+
We can eliminate some of the readings, and hence some of the threads,
|
364 |
+
by adding background information.
|
365 |
+
|
366 |
+
>>> import nltk.data
|
367 |
+
>>> bg = nltk.data.load('grammars/book_grammars/background.fol')
|
368 |
+
>>> dt.add_background(bg)
|
369 |
+
>>> dt.background()
|
370 |
+
all x.(boxerdog(x) -> dog(x))
|
371 |
+
all x.(boxer(x) -> person(x))
|
372 |
+
all x.-(dog(x) & person(x))
|
373 |
+
all x.(married(x) <-> exists y.marry(x,y))
|
374 |
+
all x.(bark(x) -> dog(x))
|
375 |
+
all x y.(marry(x,y) -> (person(x) & person(y)))
|
376 |
+
-(Vincent = Mia)
|
377 |
+
-(Vincent = Fido)
|
378 |
+
-(Mia = Fido)
|
379 |
+
|
380 |
+
The background information allows us to reject three of the threads as
|
381 |
+
inconsistent. To see what remains, use the ``filter=True`` parameter
|
382 |
+
on ``readings()``.
|
383 |
+
|
384 |
+
>>> dt.readings(filter=True)
|
385 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
386 |
+
|
387 |
+
The ``models()`` method gives us more information about the surviving thread.
|
388 |
+
|
389 |
+
>>> dt.models()
|
390 |
+
--------------------------------------------------------------------------------
|
391 |
+
Model for Discourse Thread d0
|
392 |
+
--------------------------------------------------------------------------------
|
393 |
+
No model found!
|
394 |
+
<BLANKLINE>
|
395 |
+
--------------------------------------------------------------------------------
|
396 |
+
Model for Discourse Thread d1
|
397 |
+
--------------------------------------------------------------------------------
|
398 |
+
% number = 1
|
399 |
+
% seconds = 0
|
400 |
+
<BLANKLINE>
|
401 |
+
% Interpretation of size 3
|
402 |
+
<BLANKLINE>
|
403 |
+
Fido = 0.
|
404 |
+
<BLANKLINE>
|
405 |
+
Mia = 1.
|
406 |
+
<BLANKLINE>
|
407 |
+
Vincent = 2.
|
408 |
+
<BLANKLINE>
|
409 |
+
f1(0) = 0.
|
410 |
+
f1(1) = 0.
|
411 |
+
f1(2) = 2.
|
412 |
+
<BLANKLINE>
|
413 |
+
bark(0).
|
414 |
+
- bark(1).
|
415 |
+
- bark(2).
|
416 |
+
<BLANKLINE>
|
417 |
+
- boxer(0).
|
418 |
+
- boxer(1).
|
419 |
+
boxer(2).
|
420 |
+
<BLANKLINE>
|
421 |
+
boxerdog(0).
|
422 |
+
- boxerdog(1).
|
423 |
+
- boxerdog(2).
|
424 |
+
<BLANKLINE>
|
425 |
+
dog(0).
|
426 |
+
- dog(1).
|
427 |
+
- dog(2).
|
428 |
+
<BLANKLINE>
|
429 |
+
- married(0).
|
430 |
+
- married(1).
|
431 |
+
married(2).
|
432 |
+
<BLANKLINE>
|
433 |
+
- person(0).
|
434 |
+
- person(1).
|
435 |
+
person(2).
|
436 |
+
<BLANKLINE>
|
437 |
+
- marry(0,0).
|
438 |
+
- marry(0,1).
|
439 |
+
- marry(0,2).
|
440 |
+
- marry(1,0).
|
441 |
+
- marry(1,1).
|
442 |
+
- marry(1,2).
|
443 |
+
- marry(2,0).
|
444 |
+
- marry(2,1).
|
445 |
+
marry(2,2).
|
446 |
+
<BLANKLINE>
|
447 |
+
--------------------------------------------------------------------------------
|
448 |
+
Model for Discourse Thread d2
|
449 |
+
--------------------------------------------------------------------------------
|
450 |
+
No model found!
|
451 |
+
<BLANKLINE>
|
452 |
+
--------------------------------------------------------------------------------
|
453 |
+
Model for Discourse Thread d3
|
454 |
+
--------------------------------------------------------------------------------
|
455 |
+
No model found!
|
456 |
+
<BLANKLINE>
|
457 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0', 's3-r0']:
|
458 |
+
s0-r0: boxer(Vincent)
|
459 |
+
s1-r0: boxer(Fido)
|
460 |
+
s2-r0: married(Vincent)
|
461 |
+
s3-r0: bark(Fido)
|
462 |
+
<BLANKLINE>
|
463 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1', 's2-r0', 's3-r0']:
|
464 |
+
s0-r0: boxer(Vincent)
|
465 |
+
s1-r1: boxerdog(Fido)
|
466 |
+
s2-r0: married(Vincent)
|
467 |
+
s3-r0: bark(Fido)
|
468 |
+
<BLANKLINE>
|
469 |
+
Inconsistent discourse: d2 ['s0-r1', 's1-r0', 's2-r0', 's3-r0']:
|
470 |
+
s0-r1: boxerdog(Vincent)
|
471 |
+
s1-r0: boxer(Fido)
|
472 |
+
s2-r0: married(Vincent)
|
473 |
+
s3-r0: bark(Fido)
|
474 |
+
<BLANKLINE>
|
475 |
+
Inconsistent discourse: d3 ['s0-r1', 's1-r1', 's2-r0', 's3-r0']:
|
476 |
+
s0-r1: boxerdog(Vincent)
|
477 |
+
s1-r1: boxerdog(Fido)
|
478 |
+
s2-r0: married(Vincent)
|
479 |
+
s3-r0: bark(Fido)
|
480 |
+
<BLANKLINE>
|
481 |
+
|
482 |
+
|
483 |
+
.. This will not be visible in the html output: create a tempdir to
|
484 |
+
play in.
|
485 |
+
>>> import tempfile, os
|
486 |
+
>>> tempdir = tempfile.mkdtemp()
|
487 |
+
>>> old_dir = os.path.abspath('.')
|
488 |
+
>>> os.chdir(tempdir)
|
489 |
+
|
490 |
+
In order to play around with your own version of background knowledge,
|
491 |
+
you might want to start off with a local copy of ``background.fol``:
|
492 |
+
|
493 |
+
>>> nltk.data.retrieve('grammars/book_grammars/background.fol')
|
494 |
+
Retrieving 'nltk:grammars/book_grammars/background.fol', saving to 'background.fol'
|
495 |
+
|
496 |
+
After you have modified the file, the ``load_fol()`` function will parse
|
497 |
+
the strings in the file into expressions of ``nltk.sem.logic``.
|
498 |
+
|
499 |
+
>>> from nltk.inference.discourse import load_fol
|
500 |
+
>>> mybg = load_fol(open('background.fol').read())
|
501 |
+
|
502 |
+
The result can be loaded as an argument of ``add_background()`` in the
|
503 |
+
manner shown earlier.
|
504 |
+
|
505 |
+
.. This will not be visible in the html output: clean up the tempdir.
|
506 |
+
>>> os.chdir(old_dir)
|
507 |
+
>>> for f in os.listdir(tempdir):
|
508 |
+
... os.remove(os.path.join(tempdir, f))
|
509 |
+
>>> os.rmdir(tempdir)
|
510 |
+
>>> nltk.data.clear_cache()
|
511 |
+
|
512 |
+
|
513 |
+
Regression Testing from book
|
514 |
+
============================
|
515 |
+
|
516 |
+
>>> logic._counter._value = 0
|
517 |
+
|
518 |
+
>>> from nltk.tag import RegexpTagger
|
519 |
+
>>> tagger = RegexpTagger(
|
520 |
+
... [('^(chases|runs)$', 'VB'),
|
521 |
+
... ('^(a)$', 'ex_quant'),
|
522 |
+
... ('^(every)$', 'univ_quant'),
|
523 |
+
... ('^(dog|boy)$', 'NN'),
|
524 |
+
... ('^(He)$', 'PRP')
|
525 |
+
... ])
|
526 |
+
>>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger))
|
527 |
+
>>> dt = DiscourseTester(map(str.split, ['Every dog chases a boy', 'He runs']), rc)
|
528 |
+
>>> dt.readings()
|
529 |
+
<BLANKLINE>
|
530 |
+
s0 readings:
|
531 |
+
<BLANKLINE>
|
532 |
+
s0-r0: ([z2],[boy(z2), (([z5],[dog(z5)]) -> ([],[chases(z5,z2)]))])
|
533 |
+
s0-r1: ([],[(([z1],[dog(z1)]) -> ([z2],[boy(z2), chases(z1,z2)]))])
|
534 |
+
<BLANKLINE>
|
535 |
+
s1 readings:
|
536 |
+
<BLANKLINE>
|
537 |
+
s1-r0: ([z1],[PRO(z1), runs(z1)])
|
538 |
+
>>> dt.readings(show_thread_readings=True)
|
539 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z2],[boy(z1), (([z3],[dog(z3)]) -> ([],[chases(z3,z1)])), (z2 = z1), runs(z2)])
|
540 |
+
d1: ['s0-r1', 's1-r0'] : INVALID: AnaphoraResolutionException
|
541 |
+
>>> dt.readings(filter=True, show_thread_readings=True)
|
542 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z3],[boy(z1), (([z2],[dog(z2)]) -> ([],[chases(z2,z1)])), (z3 = z1), runs(z3)])
|
543 |
+
|
544 |
+
>>> logic._counter._value = 0
|
545 |
+
|
546 |
+
>>> from nltk.parse import FeatureEarleyChartParser
|
547 |
+
>>> from nltk.sem.drt import DrtParser
|
548 |
+
>>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser())
|
549 |
+
>>> parser = FeatureEarleyChartParser(grammar, trace=0)
|
550 |
+
>>> trees = parser.parse('Angus owns a dog'.split())
|
551 |
+
>>> print(list(trees)[0].label()['SEM'].simplify().normalize())
|
552 |
+
([z1,z2],[Angus(z1), dog(z2), own(z1,z2)])
|
venv/lib/python3.10/site-packages/nltk/test/drt.doctest
ADDED
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
================================
|
5 |
+
Discourse Representation Theory
|
6 |
+
================================
|
7 |
+
|
8 |
+
>>> from nltk.sem import logic
|
9 |
+
>>> from nltk.inference import TableauProver
|
10 |
+
|
11 |
+
Overview
|
12 |
+
========
|
13 |
+
|
14 |
+
A DRS can be created with the ``DRS()`` constructor. This takes two arguments: a list of
|
15 |
+
discourse referents and list of conditions. .
|
16 |
+
|
17 |
+
>>> from nltk.sem.drt import *
|
18 |
+
>>> dexpr = DrtExpression.fromstring
|
19 |
+
>>> man_x = dexpr('man(x)')
|
20 |
+
>>> walk_x = dexpr('walk(x)')
|
21 |
+
>>> x = dexpr('x')
|
22 |
+
>>> print(DRS([x], [man_x, walk_x]))
|
23 |
+
([x],[man(x), walk(x)])
|
24 |
+
|
25 |
+
The ``parse()`` method can also be applied directly to DRS
|
26 |
+
expressions, which allows them to be specified more
|
27 |
+
easily.
|
28 |
+
|
29 |
+
>>> drs1 = dexpr('([x],[man(x),walk(x)])')
|
30 |
+
>>> print(drs1)
|
31 |
+
([x],[man(x), walk(x)])
|
32 |
+
|
33 |
+
DRSs can be *merged* using the ``+`` operator.
|
34 |
+
|
35 |
+
>>> drs2 = dexpr('([y],[woman(y),stop(y)])')
|
36 |
+
>>> drs3 = drs1 + drs2
|
37 |
+
>>> print(drs3)
|
38 |
+
(([x],[man(x), walk(x)]) + ([y],[woman(y), stop(y)]))
|
39 |
+
>>> print(drs3.simplify())
|
40 |
+
([x,y],[man(x), walk(x), woman(y), stop(y)])
|
41 |
+
|
42 |
+
We can embed DRSs as components of an ``implies`` condition.
|
43 |
+
|
44 |
+
>>> s = '([], [(%s -> %s)])' % (drs1, drs2)
|
45 |
+
>>> print(dexpr(s))
|
46 |
+
([],[(([x],[man(x), walk(x)]) -> ([y],[woman(y), stop(y)]))])
|
47 |
+
|
48 |
+
The ``fol()`` method converts DRSs into FOL formulae.
|
49 |
+
|
50 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
|
51 |
+
exists x.(man(x) & walks(x))
|
52 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
|
53 |
+
all x.(man(x) -> walks(x))
|
54 |
+
|
55 |
+
In order to visualize a DRS, the ``pretty_format()`` method can be used.
|
56 |
+
|
57 |
+
>>> print(drs3.pretty_format())
|
58 |
+
_________ __________
|
59 |
+
| x | | y |
|
60 |
+
(|---------| + |----------|)
|
61 |
+
| man(x) | | woman(y) |
|
62 |
+
| walk(x) | | stop(y) |
|
63 |
+
|_________| |__________|
|
64 |
+
|
65 |
+
|
66 |
+
Parse to semantics
|
67 |
+
------------------
|
68 |
+
|
69 |
+
..
|
70 |
+
>>> logic._counter._value = 0
|
71 |
+
|
72 |
+
DRSs can be used for building compositional semantics in a feature
|
73 |
+
based grammar. To specify that we want to use DRSs, the appropriate
|
74 |
+
logic parser needs be passed as a parameter to ``load_earley()``
|
75 |
+
|
76 |
+
>>> from nltk.parse import load_parser
|
77 |
+
>>> from nltk.sem.drt import DrtParser
|
78 |
+
>>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, logic_parser=DrtParser())
|
79 |
+
>>> for tree in parser.parse('a dog barks'.split()):
|
80 |
+
... print(tree.label()['SEM'].simplify())
|
81 |
+
...
|
82 |
+
([x],[dog(x), bark(x)])
|
83 |
+
|
84 |
+
Alternatively, a ``FeatStructReader`` can be passed with the ``logic_parser`` set on it
|
85 |
+
|
86 |
+
>>> from nltk.featstruct import FeatStructReader
|
87 |
+
>>> from nltk.grammar import FeatStructNonterminal
|
88 |
+
>>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, fstruct_reader=FeatStructReader(fdict_class=FeatStructNonterminal, logic_parser=DrtParser()))
|
89 |
+
>>> for tree in parser.parse('every girl chases a dog'.split()):
|
90 |
+
... print(tree.label()['SEM'].simplify().normalize())
|
91 |
+
...
|
92 |
+
([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chase(z1,z2)]))])
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
Unit Tests
|
97 |
+
==========
|
98 |
+
|
99 |
+
Parser
|
100 |
+
------
|
101 |
+
|
102 |
+
>>> print(dexpr(r'([x,y],[sees(x,y)])'))
|
103 |
+
([x,y],[sees(x,y)])
|
104 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])'))
|
105 |
+
([x],[man(x), walks(x)])
|
106 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])'))
|
107 |
+
\x.([],[man(x), walks(x)])
|
108 |
+
>>> print(dexpr(r'\x.\y.([],[sees(x,y)])'))
|
109 |
+
\x y.([],[sees(x,y)])
|
110 |
+
|
111 |
+
>>> print(dexpr(r'([x,y],[(x = y)])'))
|
112 |
+
([x,y],[(x = y)])
|
113 |
+
>>> print(dexpr(r'([x,y],[(x != y)])'))
|
114 |
+
([x,y],[-(x = y)])
|
115 |
+
|
116 |
+
>>> print(dexpr(r'\x.([],[walks(x)])(john)'))
|
117 |
+
(\x.([],[walks(x)]))(john)
|
118 |
+
>>> print(dexpr(r'\R.\x.([],[big(x,R)])(\y.([],[mouse(y)]))'))
|
119 |
+
(\R x.([],[big(x,R)]))(\y.([],[mouse(y)]))
|
120 |
+
|
121 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))'))
|
122 |
+
(([x],[walks(x)]) + ([y],[runs(y)]))
|
123 |
+
>>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))'))
|
124 |
+
(([x,y],[walks(x), jumps(y)]) + ([z],[twos(z)]) + ([w],[runs(w)]))
|
125 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[twos(x)])) + ([],[runs(x)]))'))
|
126 |
+
(([],[walks(x)]) + ([],[twos(x)]) + ([],[runs(x)]))
|
127 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)])) + (([],[threes(x)]) + ([],[fours(x)])))'))
|
128 |
+
(([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)]))
|
129 |
+
|
130 |
+
>>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))'))
|
131 |
+
(([],[walks(x)]) -> ([],[runs(x)]))
|
132 |
+
|
133 |
+
>>> print(dexpr(r'([x],[PRO(x), sees(John,x)])'))
|
134 |
+
([x],[PRO(x), sees(John,x)])
|
135 |
+
>>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])'))
|
136 |
+
([x],[man(x), -([],[walks(x)])])
|
137 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'))
|
138 |
+
([],[(([x],[man(x)]) -> ([],[walks(x)]))])
|
139 |
+
|
140 |
+
>>> print(dexpr(r'DRS([x],[walk(x)])'))
|
141 |
+
([x],[walk(x)])
|
142 |
+
>>> print(dexpr(r'DRS([x][walk(x)])'))
|
143 |
+
([x],[walk(x)])
|
144 |
+
>>> print(dexpr(r'([x][walk(x)])'))
|
145 |
+
([x],[walk(x)])
|
146 |
+
|
147 |
+
``simplify()``
|
148 |
+
--------------
|
149 |
+
|
150 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])(john)').simplify())
|
151 |
+
([],[man(john), walks(john)])
|
152 |
+
>>> print(dexpr(r'\x.\y.([z],[dog(z),sees(x,y)])(john)(mary)').simplify())
|
153 |
+
([z],[dog(z), sees(john,mary)])
|
154 |
+
>>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').simplify())
|
155 |
+
\x.([],[big(x,\y.([],[mouse(y)]))])
|
156 |
+
|
157 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').simplify())
|
158 |
+
([x,y],[walks(x), runs(y)])
|
159 |
+
>>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))').simplify())
|
160 |
+
([w,x,y,z],[walks(x), jumps(y), twos(z), runs(w)])
|
161 |
+
>>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)])))').simplify())
|
162 |
+
([],[walks(x), runs(x), threes(x), fours(x)])
|
163 |
+
>>> dexpr(r'([x],[man(x)])+([x],[walks(x)])').simplify() == \
|
164 |
+
... dexpr(r'([x,z1],[man(x), walks(z1)])')
|
165 |
+
True
|
166 |
+
>>> dexpr(r'([y],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)]))])+([x],[run(x)])').simplify() == \
|
167 |
+
... dexpr(r'([y,z1],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)])), run(z1)])')
|
168 |
+
True
|
169 |
+
|
170 |
+
>>> dexpr(r'\Q.(([x],[john(x),walks(x)]) + Q)(([x],[PRO(x),leaves(x)]))').simplify() == \
|
171 |
+
... dexpr(r'([x,z1],[john(x), walks(x), PRO(z1), leaves(z1)])')
|
172 |
+
True
|
173 |
+
|
174 |
+
>>> logic._counter._value = 0
|
175 |
+
>>> print(dexpr('([],[(([x],[dog(x)]) -> ([e,y],[boy(y), chase(e), subj(e,x), obj(e,y)]))])+([e,x],[PRO(x), run(e), subj(e,x)])').simplify().normalize().normalize())
|
176 |
+
([e02,z5],[(([z3],[dog(z3)]) -> ([e01,z4],[boy(z4), chase(e01), subj(e01,z3), obj(e01,z4)])), PRO(z5), run(e02), subj(e02,z5)])
|
177 |
+
|
178 |
+
``fol()``
|
179 |
+
-----------
|
180 |
+
|
181 |
+
>>> print(dexpr(r'([x,y],[sees(x,y)])').fol())
|
182 |
+
exists x y.sees(x,y)
|
183 |
+
>>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
|
184 |
+
exists x.(man(x) & walks(x))
|
185 |
+
>>> print(dexpr(r'\x.([],[man(x), walks(x)])').fol())
|
186 |
+
\x.(man(x) & walks(x))
|
187 |
+
>>> print(dexpr(r'\x y.([],[sees(x,y)])').fol())
|
188 |
+
\x y.sees(x,y)
|
189 |
+
|
190 |
+
>>> print(dexpr(r'\x.([],[walks(x)])(john)').fol())
|
191 |
+
\x.walks(x)(john)
|
192 |
+
>>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').fol())
|
193 |
+
(\R x.big(x,R))(\y.mouse(y))
|
194 |
+
|
195 |
+
>>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').fol())
|
196 |
+
(exists x.walks(x) & exists y.runs(y))
|
197 |
+
|
198 |
+
>>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))').fol())
|
199 |
+
(walks(x) -> runs(x))
|
200 |
+
|
201 |
+
>>> print(dexpr(r'([x],[PRO(x), sees(John,x)])').fol())
|
202 |
+
exists x.(PRO(x) & sees(John,x))
|
203 |
+
>>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])').fol())
|
204 |
+
exists x.(man(x) & -walks(x))
|
205 |
+
>>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
|
206 |
+
all x.(man(x) -> walks(x))
|
207 |
+
|
208 |
+
>>> print(dexpr(r'([x],[man(x) | walks(x)])').fol())
|
209 |
+
exists x.(man(x) | walks(x))
|
210 |
+
>>> print(dexpr(r'P(x) + ([x],[walks(x)])').fol())
|
211 |
+
(P(x) & exists x.walks(x))
|
212 |
+
|
213 |
+
``resolve_anaphora()``
|
214 |
+
----------------------
|
215 |
+
|
216 |
+
>>> from nltk.sem.drt import AnaphoraResolutionException
|
217 |
+
|
218 |
+
>>> print(resolve_anaphora(dexpr(r'([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])')))
|
219 |
+
([x,y,z],[dog(x), cat(y), walks(z), (z = [x,y])])
|
220 |
+
>>> print(resolve_anaphora(dexpr(r'([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])')))
|
221 |
+
([],[(([x],[dog(x)]) -> ([y],[walks(y), (y = x)]))])
|
222 |
+
>>> print(resolve_anaphora(dexpr(r'(([x,y],[]) + ([],[PRO(x)]))')).simplify())
|
223 |
+
([x,y],[(x = y)])
|
224 |
+
>>> try: print(resolve_anaphora(dexpr(r'([x],[walks(x), PRO(x)])')))
|
225 |
+
... except AnaphoraResolutionException as e: print(e)
|
226 |
+
Variable 'x' does not resolve to anything.
|
227 |
+
>>> print(resolve_anaphora(dexpr('([e01,z6,z7],[boy(z6), PRO(z7), run(e01), subj(e01,z7)])')))
|
228 |
+
([e01,z6,z7],[boy(z6), (z7 = z6), run(e01), subj(e01,z7)])
|
229 |
+
|
230 |
+
``equiv()``:
|
231 |
+
----------------
|
232 |
+
|
233 |
+
>>> a = dexpr(r'([x],[man(x), walks(x)])')
|
234 |
+
>>> b = dexpr(r'([x],[walks(x), man(x)])')
|
235 |
+
>>> print(a.equiv(b, TableauProver()))
|
236 |
+
True
|
237 |
+
|
238 |
+
|
239 |
+
``replace()``:
|
240 |
+
--------------
|
241 |
+
|
242 |
+
>>> a = dexpr(r'a')
|
243 |
+
>>> w = dexpr(r'w')
|
244 |
+
>>> x = dexpr(r'x')
|
245 |
+
>>> y = dexpr(r'y')
|
246 |
+
>>> z = dexpr(r'z')
|
247 |
+
|
248 |
+
|
249 |
+
replace bound
|
250 |
+
-------------
|
251 |
+
|
252 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, False))
|
253 |
+
([x],[give(x,y,z)])
|
254 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, True))
|
255 |
+
([a],[give(a,y,z)])
|
256 |
+
|
257 |
+
replace unbound
|
258 |
+
---------------
|
259 |
+
|
260 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, False))
|
261 |
+
([x],[give(x,a,z)])
|
262 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, True))
|
263 |
+
([x],[give(x,a,z)])
|
264 |
+
|
265 |
+
replace unbound with bound
|
266 |
+
--------------------------
|
267 |
+
|
268 |
+
>>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, False) == \
|
269 |
+
... dexpr('([z1],[give(z1,x,z)])')
|
270 |
+
True
|
271 |
+
>>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, True) == \
|
272 |
+
... dexpr('([z1],[give(z1,x,z)])')
|
273 |
+
True
|
274 |
+
|
275 |
+
replace unbound with unbound
|
276 |
+
----------------------------
|
277 |
+
|
278 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, False))
|
279 |
+
([x],[give(x,z,z)])
|
280 |
+
>>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, True))
|
281 |
+
([x],[give(x,z,z)])
|
282 |
+
|
283 |
+
|
284 |
+
replace unbound
|
285 |
+
---------------
|
286 |
+
|
287 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
|
288 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
289 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
|
290 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
291 |
+
|
292 |
+
replace bound
|
293 |
+
-------------
|
294 |
+
|
295 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, False))
|
296 |
+
(([x],[P(x,y,z)]) + ([y],[Q(x,y,z)]))
|
297 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, True))
|
298 |
+
(([a],[P(a,y,z)]) + ([y],[Q(a,y,z)]))
|
299 |
+
|
300 |
+
replace unbound with unbound
|
301 |
+
----------------------------
|
302 |
+
|
303 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
|
304 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
305 |
+
>>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
|
306 |
+
(([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
|
307 |
+
|
308 |
+
replace unbound with bound on same side
|
309 |
+
---------------------------------------
|
310 |
+
|
311 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, False) == \
|
312 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
|
313 |
+
True
|
314 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, True) == \
|
315 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
|
316 |
+
True
|
317 |
+
|
318 |
+
replace unbound with bound on other side
|
319 |
+
----------------------------------------
|
320 |
+
|
321 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, False) == \
|
322 |
+
... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
|
323 |
+
True
|
324 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, True) == \
|
325 |
+
... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
|
326 |
+
True
|
327 |
+
|
328 |
+
replace unbound with double bound
|
329 |
+
---------------------------------
|
330 |
+
|
331 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, False) == \
|
332 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
|
333 |
+
True
|
334 |
+
>>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, True) == \
|
335 |
+
... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
|
336 |
+
True
|
337 |
+
|
338 |
+
|
339 |
+
regression tests
|
340 |
+
----------------
|
341 |
+
|
342 |
+
>>> d = dexpr('([x],[A(c), ([y],[B(x,y,z,a)])->([z],[C(x,y,z,a)])])')
|
343 |
+
>>> print(d)
|
344 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
345 |
+
>>> print(d.pretty_format())
|
346 |
+
____________________________________
|
347 |
+
| x |
|
348 |
+
|------------------------------------|
|
349 |
+
| A(c) |
|
350 |
+
| ____________ ____________ |
|
351 |
+
| | y | | z | |
|
352 |
+
| (|------------| -> |------------|) |
|
353 |
+
| | B(x,y,z,a) | | C(x,y,z,a) | |
|
354 |
+
| |____________| |____________| |
|
355 |
+
|____________________________________|
|
356 |
+
>>> print(str(d))
|
357 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
358 |
+
>>> print(d.fol())
|
359 |
+
exists x.(A(c) & all y.(B(x,y,z,a) -> exists z.C(x,y,z,a)))
|
360 |
+
>>> print(d.replace(Variable('a'), DrtVariableExpression(Variable('r'))))
|
361 |
+
([x],[A(c), (([y],[B(x,y,z,r)]) -> ([z],[C(x,y,z,r)]))])
|
362 |
+
>>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r'))))
|
363 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
364 |
+
>>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r'))))
|
365 |
+
([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
|
366 |
+
>>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r'))))
|
367 |
+
([x],[A(c), (([y],[B(x,y,r,a)]) -> ([z],[C(x,y,z,a)]))])
|
368 |
+
>>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r')), True))
|
369 |
+
([r],[A(c), (([y],[B(r,y,z,a)]) -> ([z],[C(r,y,z,a)]))])
|
370 |
+
>>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r')), True))
|
371 |
+
([x],[A(c), (([r],[B(x,r,z,a)]) -> ([z],[C(x,r,z,a)]))])
|
372 |
+
>>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r')), True))
|
373 |
+
([x],[A(c), (([y],[B(x,y,r,a)]) -> ([r],[C(x,y,r,a)]))])
|
374 |
+
>>> print(d == dexpr('([l],[A(c), ([m],[B(l,m,z,a)])->([n],[C(l,m,n,a)])])'))
|
375 |
+
True
|
376 |
+
>>> d = dexpr('([],[([x,y],[B(x,y,h), ([a,b],[dee(x,a,g)])])->([z,w],[cee(x,y,f), ([c,d],[E(x,c,d,e)])])])')
|
377 |
+
>>> sorted(d.free())
|
378 |
+
[Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
|
379 |
+
>>> sorted(d.variables())
|
380 |
+
[Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
|
381 |
+
>>> sorted(d.get_refs(True))
|
382 |
+
[Variable('a'), Variable('b'), Variable('c'), Variable('d'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
383 |
+
>>> sorted(d.conds[0].get_refs(False))
|
384 |
+
[Variable('x'), Variable('y')]
|
385 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])->([],[C(x,y)]), ([x,y],[D(x,y)])->([],[E(x,y)]), ([],[F(x,y)])->([x,y],[G(x,y)])])').eliminate_equality())
|
386 |
+
([x],[A(x,x), (([],[B(x,x)]) -> ([],[C(x,x)])), (([x,y],[D(x,y)]) -> ([],[E(x,y)])), (([],[F(x,x)]) -> ([x,y],[G(x,y)]))])
|
387 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y)]) -> ([],[B(x,y)])').eliminate_equality())
|
388 |
+
(([x],[A(x,x)]) -> ([],[B(x,x)]))
|
389 |
+
>>> print(dexpr('([x,y],[A(x,y)]) -> ([],[B(x,y), (x=y)])').eliminate_equality())
|
390 |
+
(([x,y],[A(x,y)]) -> ([],[B(x,x)]))
|
391 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])])').eliminate_equality())
|
392 |
+
([x],[A(x,x), ([],[B(x,x)])])
|
393 |
+
>>> print(dexpr('([x,y],[A(x,y), ([],[B(x,y), (x=y)])])').eliminate_equality())
|
394 |
+
([x,y],[A(x,y), ([],[B(x,x)])])
|
395 |
+
>>> print(dexpr('([z8 z9 z10],[A(z8), z8=z10, z9=z10, B(z9), C(z10), D(z10)])').eliminate_equality())
|
396 |
+
([z9],[A(z9), B(z9), C(z9), D(z9)])
|
397 |
+
|
398 |
+
>>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)]), ([x,y],[C(x,y)])])').eliminate_equality())
|
399 |
+
([x],[A(x,x), ([],[B(x,x)]), ([x,y],[C(x,y)])])
|
400 |
+
>>> print(dexpr('([x,y],[A(x,y)]) + ([],[B(x,y), (x=y)]) + ([],[C(x,y)])').eliminate_equality())
|
401 |
+
([x],[A(x,x), B(x,x), C(x,x)])
|
402 |
+
>>> print(dexpr('([x,y],[B(x,y)])+([x,y],[C(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
403 |
+
(([x,y],[B(x,y)]) + ([x,y],[C(x,y)]))
|
404 |
+
>>> print(dexpr('(([x,y],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
405 |
+
(([x,y],[B(x,y)]) + ([],[C(x,y)]) + ([],[D(x,y)]))
|
406 |
+
>>> print(dexpr('(([],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
|
407 |
+
(([],[B(x,x)]) + ([],[C(x,x)]) + ([],[D(x,x)]))
|
408 |
+
>>> print(dexpr('(([],[B(x,y), ([x,y],[A(x,y)])])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))).normalize())
|
409 |
+
(([],[B(z3,z1), ([z2,z3],[A(z3,z2)])]) + ([],[C(z3,z1)]) + ([],[D(z3,z1)]))
|
410 |
+
|
411 |
+
|
412 |
+
Parse errors
|
413 |
+
============
|
414 |
+
|
415 |
+
>>> def parse_error(drtstring):
|
416 |
+
... try: dexpr(drtstring)
|
417 |
+
... except logic.LogicalExpressionException as e: print(e)
|
418 |
+
|
419 |
+
>>> parse_error(r'')
|
420 |
+
End of input found. Expression expected.
|
421 |
+
<BLANKLINE>
|
422 |
+
^
|
423 |
+
>>> parse_error(r'(')
|
424 |
+
End of input found. Expression expected.
|
425 |
+
(
|
426 |
+
^
|
427 |
+
>>> parse_error(r'()')
|
428 |
+
Unexpected token: ')'. Expression expected.
|
429 |
+
()
|
430 |
+
^
|
431 |
+
>>> parse_error(r'([')
|
432 |
+
End of input found. Expected token ']'.
|
433 |
+
([
|
434 |
+
^
|
435 |
+
>>> parse_error(r'([,')
|
436 |
+
',' is an illegal variable name. Constants may not be quantified.
|
437 |
+
([,
|
438 |
+
^
|
439 |
+
>>> parse_error(r'([x,')
|
440 |
+
End of input found. Variable expected.
|
441 |
+
([x,
|
442 |
+
^
|
443 |
+
>>> parse_error(r'([]')
|
444 |
+
End of input found. Expected token '['.
|
445 |
+
([]
|
446 |
+
^
|
447 |
+
>>> parse_error(r'([][')
|
448 |
+
End of input found. Expected token ']'.
|
449 |
+
([][
|
450 |
+
^
|
451 |
+
>>> parse_error(r'([][,')
|
452 |
+
Unexpected token: ','. Expression expected.
|
453 |
+
([][,
|
454 |
+
^
|
455 |
+
>>> parse_error(r'([][]')
|
456 |
+
End of input found. Expected token ')'.
|
457 |
+
([][]
|
458 |
+
^
|
459 |
+
>>> parse_error(r'([x][man(x)]) |')
|
460 |
+
End of input found. Expression expected.
|
461 |
+
([x][man(x)]) |
|
462 |
+
^
|
463 |
+
|
464 |
+
Pretty Printing
|
465 |
+
===============
|
466 |
+
|
467 |
+
>>> dexpr(r"([],[])").pretty_print()
|
468 |
+
__
|
469 |
+
| |
|
470 |
+
|--|
|
471 |
+
|__|
|
472 |
+
|
473 |
+
>>> dexpr(r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])").pretty_print()
|
474 |
+
_____________________________
|
475 |
+
| |
|
476 |
+
|-----------------------------|
|
477 |
+
| ________ _________ |
|
478 |
+
| | x | | | |
|
479 |
+
| (|--------| -> |---------|) |
|
480 |
+
| | big(x) | | bark(x) | |
|
481 |
+
| | dog(x) | |_________| |
|
482 |
+
| |________| |
|
483 |
+
| _________ |
|
484 |
+
| | x | |
|
485 |
+
| __ |---------| |
|
486 |
+
| | | walk(x) | |
|
487 |
+
| |_________| |
|
488 |
+
|_____________________________|
|
489 |
+
|
490 |
+
>>> dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print()
|
491 |
+
_________ _________
|
492 |
+
| x y | | z |
|
493 |
+
(|---------| + |---------|)
|
494 |
+
| (x = y) | | dog(z) |
|
495 |
+
|_________| | walk(z) |
|
496 |
+
|_________|
|
497 |
+
|
498 |
+
>>> dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print()
|
499 |
+
_______________________________
|
500 |
+
| |
|
501 |
+
|-------------------------------|
|
502 |
+
| ___ ___ _________ |
|
503 |
+
| | x | | y | | z | |
|
504 |
+
| (|---| | |---| | |---------|) |
|
505 |
+
| |___| |___| | dog(z) | |
|
506 |
+
| | walk(z) | |
|
507 |
+
| |_________| |
|
508 |
+
|_______________________________|
|
509 |
+
|
510 |
+
>>> dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
|
511 |
+
___ ________
|
512 |
+
\ | x | \ | |
|
513 |
+
/\ P Q.(|---| + P(x) + Q(x))( /\ x.|--------|)
|
514 |
+
|___| | dog(x) |
|
515 |
+
|________|
|
venv/lib/python3.10/site-packages/nltk/test/featgram.doctest
ADDED
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=========================
|
5 |
+
Feature Grammar Parsing
|
6 |
+
=========================
|
7 |
+
|
8 |
+
.. definitions from nltk_book/definitions.rst
|
9 |
+
|
10 |
+
.. role:: feat
|
11 |
+
:class: feature
|
12 |
+
.. role:: fval
|
13 |
+
:class: fval
|
14 |
+
.. |rarr| unicode:: U+2192 .. right arrow
|
15 |
+
.. |dot| unicode:: U+2022 .. bullet
|
16 |
+
.. |pi| unicode:: U+03C0
|
17 |
+
|
18 |
+
Grammars can be parsed from strings.
|
19 |
+
|
20 |
+
>>> import nltk
|
21 |
+
>>> from nltk import grammar, parse
|
22 |
+
>>> g = """
|
23 |
+
... % start DP
|
24 |
+
... DP[AGR=?a] -> D[AGR=?a] N[AGR=?a]
|
25 |
+
... D[AGR=[NUM='sg', PERS=3]] -> 'this' | 'that'
|
26 |
+
... D[AGR=[NUM='pl', PERS=3]] -> 'these' | 'those'
|
27 |
+
... D[AGR=[NUM='pl', PERS=1]] -> 'we'
|
28 |
+
... D[AGR=[PERS=2]] -> 'you'
|
29 |
+
... N[AGR=[NUM='sg', GND='m']] -> 'boy'
|
30 |
+
... N[AGR=[NUM='pl', GND='m']] -> 'boys'
|
31 |
+
... N[AGR=[NUM='sg', GND='f']] -> 'girl'
|
32 |
+
... N[AGR=[NUM='pl', GND='f']] -> 'girls'
|
33 |
+
... N[AGR=[NUM='sg']] -> 'student'
|
34 |
+
... N[AGR=[NUM='pl']] -> 'students'
|
35 |
+
... """
|
36 |
+
>>> grammar = grammar.FeatureGrammar.fromstring(g)
|
37 |
+
>>> tokens = 'these girls'.split()
|
38 |
+
>>> parser = parse.FeatureEarleyChartParser(grammar)
|
39 |
+
>>> trees = parser.parse(tokens)
|
40 |
+
>>> for tree in trees: print(tree)
|
41 |
+
(DP[AGR=[GND='f', NUM='pl', PERS=3]]
|
42 |
+
(D[AGR=[NUM='pl', PERS=3]] these)
|
43 |
+
(N[AGR=[GND='f', NUM='pl']] girls))
|
44 |
+
|
45 |
+
In general, when we are trying to develop even a very small grammar,
|
46 |
+
it is convenient to put the rules in a file where they can be edited,
|
47 |
+
tested and revised. Let's assume that we have saved feat0cfg as a file named
|
48 |
+
``'feat0.fcfg'`` and placed it in the NLTK ``data`` directory. We can
|
49 |
+
inspect it as follows:
|
50 |
+
|
51 |
+
>>> nltk.data.show_cfg('grammars/book_grammars/feat0.fcfg')
|
52 |
+
% start S
|
53 |
+
# ###################
|
54 |
+
# Grammar Productions
|
55 |
+
# ###################
|
56 |
+
# S expansion productions
|
57 |
+
S -> NP[NUM=?n] VP[NUM=?n]
|
58 |
+
# NP expansion productions
|
59 |
+
NP[NUM=?n] -> N[NUM=?n]
|
60 |
+
NP[NUM=?n] -> PropN[NUM=?n]
|
61 |
+
NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n]
|
62 |
+
NP[NUM=pl] -> N[NUM=pl]
|
63 |
+
# VP expansion productions
|
64 |
+
VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n]
|
65 |
+
VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP
|
66 |
+
# ###################
|
67 |
+
# Lexical Productions
|
68 |
+
# ###################
|
69 |
+
Det[NUM=sg] -> 'this' | 'every'
|
70 |
+
Det[NUM=pl] -> 'these' | 'all'
|
71 |
+
Det -> 'the' | 'some' | 'several'
|
72 |
+
PropN[NUM=sg]-> 'Kim' | 'Jody'
|
73 |
+
N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child'
|
74 |
+
N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children'
|
75 |
+
IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks'
|
76 |
+
TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes'
|
77 |
+
IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk'
|
78 |
+
TV[TENSE=pres, NUM=pl] -> 'see' | 'like'
|
79 |
+
IV[TENSE=past] -> 'disappeared' | 'walked'
|
80 |
+
TV[TENSE=past] -> 'saw' | 'liked'
|
81 |
+
|
82 |
+
Assuming we have saved feat0cfg as a file named
|
83 |
+
``'feat0.fcfg'``, the function ``parse.load_parser`` allows us to
|
84 |
+
read the grammar into NLTK, ready for use in parsing.
|
85 |
+
|
86 |
+
|
87 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
|
88 |
+
>>> sent = 'Kim likes children'
|
89 |
+
>>> tokens = sent.split()
|
90 |
+
>>> tokens
|
91 |
+
['Kim', 'likes', 'children']
|
92 |
+
>>> trees = cp.parse(tokens)
|
93 |
+
|.Kim .like.chil.|
|
94 |
+
|[----] . .| [0:1] 'Kim'
|
95 |
+
|. [----] .| [1:2] 'likes'
|
96 |
+
|. . [----]| [2:3] 'children'
|
97 |
+
|[----] . .| [0:1] PropN[NUM='sg'] -> 'Kim' *
|
98 |
+
|[----] . .| [0:1] NP[NUM='sg'] -> PropN[NUM='sg'] *
|
99 |
+
|[----> . .| [0:1] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'sg'}
|
100 |
+
|. [----] .| [1:2] TV[NUM='sg', TENSE='pres'] -> 'likes' *
|
101 |
+
|. [----> .| [1:2] VP[NUM=?n, TENSE=?t] -> TV[NUM=?n, TENSE=?t] * NP[] {?n: 'sg', ?t: 'pres'}
|
102 |
+
|. . [----]| [2:3] N[NUM='pl'] -> 'children' *
|
103 |
+
|. . [----]| [2:3] NP[NUM='pl'] -> N[NUM='pl'] *
|
104 |
+
|. . [---->| [2:3] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'pl'}
|
105 |
+
|. [---------]| [1:3] VP[NUM='sg', TENSE='pres'] -> TV[NUM='sg', TENSE='pres'] NP[] *
|
106 |
+
|[==============]| [0:3] S[] -> NP[NUM='sg'] VP[NUM='sg'] *
|
107 |
+
>>> for tree in trees: print(tree)
|
108 |
+
(S[]
|
109 |
+
(NP[NUM='sg'] (PropN[NUM='sg'] Kim))
|
110 |
+
(VP[NUM='sg', TENSE='pres']
|
111 |
+
(TV[NUM='sg', TENSE='pres'] likes)
|
112 |
+
(NP[NUM='pl'] (N[NUM='pl'] children))))
|
113 |
+
|
114 |
+
The parser works directly with
|
115 |
+
the underspecified productions given by the grammar. That is, the
|
116 |
+
Predictor rule does not attempt to compile out all admissible feature
|
117 |
+
combinations before trying to expand the non-terminals on the left hand
|
118 |
+
side of a production. However, when the Scanner matches an input word
|
119 |
+
against a lexical production that has been predicted, the new edge will
|
120 |
+
typically contain fully specified features; e.g., the edge
|
121 |
+
[PropN[`num`:feat: = `sg`:fval:] |rarr| 'Kim', (0, 1)]. Recall from
|
122 |
+
Chapter 8 that the Fundamental (or Completer) Rule in
|
123 |
+
standard CFGs is used to combine an incomplete edge that's expecting a
|
124 |
+
nonterminal *B* with a following, complete edge whose left hand side
|
125 |
+
matches *B*. In our current setting, rather than checking for a
|
126 |
+
complete match, we test whether the expected category *B* will
|
127 |
+
unify with the left hand side *B'* of a following complete
|
128 |
+
edge. We will explain in more detail in Section 9.2 how
|
129 |
+
unification works; for the moment, it is enough to know that as a
|
130 |
+
result of unification, any variable values of features in *B* will be
|
131 |
+
instantiated by constant values in the corresponding feature structure
|
132 |
+
in *B'*, and these instantiated values will be used in the new edge
|
133 |
+
added by the Completer. This instantiation can be seen, for example,
|
134 |
+
in the edge
|
135 |
+
[NP [`num`:feat:\ =\ `sg`:fval:] |rarr| PropN[`num`:feat:\ =\ `sg`:fval:] |dot|, (0, 1)]
|
136 |
+
in Example 9.2, where the feature `num`:feat: has been assigned the value `sg`:fval:.
|
137 |
+
|
138 |
+
Feature structures in NLTK are ... Atomic feature values can be strings or
|
139 |
+
integers.
|
140 |
+
|
141 |
+
>>> fs1 = nltk.FeatStruct(TENSE='past', NUM='sg')
|
142 |
+
>>> print(fs1)
|
143 |
+
[ NUM = 'sg' ]
|
144 |
+
[ TENSE = 'past' ]
|
145 |
+
|
146 |
+
We can think of a feature structure as being like a Python dictionary,
|
147 |
+
and access its values by indexing in the usual way.
|
148 |
+
|
149 |
+
>>> fs1 = nltk.FeatStruct(PER=3, NUM='pl', GND='fem')
|
150 |
+
>>> print(fs1['GND'])
|
151 |
+
fem
|
152 |
+
|
153 |
+
We can also define feature structures which have complex values, as
|
154 |
+
discussed earlier.
|
155 |
+
|
156 |
+
>>> fs2 = nltk.FeatStruct(POS='N', AGR=fs1)
|
157 |
+
>>> print(fs2)
|
158 |
+
[ [ GND = 'fem' ] ]
|
159 |
+
[ AGR = [ NUM = 'pl' ] ]
|
160 |
+
[ [ PER = 3 ] ]
|
161 |
+
[ ]
|
162 |
+
[ POS = 'N' ]
|
163 |
+
>>> print(fs2['AGR'])
|
164 |
+
[ GND = 'fem' ]
|
165 |
+
[ NUM = 'pl' ]
|
166 |
+
[ PER = 3 ]
|
167 |
+
>>> print(fs2['AGR']['PER'])
|
168 |
+
3
|
169 |
+
|
170 |
+
Feature structures can also be constructed using the ``parse()``
|
171 |
+
method of the ``nltk.FeatStruct`` class. Note that in this case, atomic
|
172 |
+
feature values do not need to be enclosed in quotes.
|
173 |
+
|
174 |
+
>>> f1 = nltk.FeatStruct("[NUMBER = sg]")
|
175 |
+
>>> f2 = nltk.FeatStruct("[PERSON = 3]")
|
176 |
+
>>> print(nltk.unify(f1, f2))
|
177 |
+
[ NUMBER = 'sg' ]
|
178 |
+
[ PERSON = 3 ]
|
179 |
+
|
180 |
+
>>> f1 = nltk.FeatStruct("[A = [B = b, D = d]]")
|
181 |
+
>>> f2 = nltk.FeatStruct("[A = [C = c, D = d]]")
|
182 |
+
>>> print(nltk.unify(f1, f2))
|
183 |
+
[ [ B = 'b' ] ]
|
184 |
+
[ A = [ C = 'c' ] ]
|
185 |
+
[ [ D = 'd' ] ]
|
186 |
+
|
187 |
+
|
188 |
+
Feature Structures as Graphs
|
189 |
+
----------------------------
|
190 |
+
|
191 |
+
Feature structures are not inherently tied to linguistic objects; they are
|
192 |
+
general purpose structures for representing knowledge. For example, we
|
193 |
+
could encode information about a person in a feature structure:
|
194 |
+
|
195 |
+
>>> person01 = nltk.FeatStruct("[NAME=Lee, TELNO='01 27 86 42 96',AGE=33]")
|
196 |
+
>>> print(person01)
|
197 |
+
[ AGE = 33 ]
|
198 |
+
[ NAME = 'Lee' ]
|
199 |
+
[ TELNO = '01 27 86 42 96' ]
|
200 |
+
|
201 |
+
There are a number of notations for representing reentrancy in
|
202 |
+
matrix-style representations of feature structures. In NLTK, we adopt
|
203 |
+
the following convention: the first occurrence of a shared feature structure
|
204 |
+
is prefixed with an integer in parentheses, such as ``(1)``, and any
|
205 |
+
subsequent reference to that structure uses the notation
|
206 |
+
``->(1)``, as shown below.
|
207 |
+
|
208 |
+
|
209 |
+
>>> fs = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
210 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
211 |
+
>>> print(fs)
|
212 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
213 |
+
[ [ STREET = 'rue Pascal' ] ]
|
214 |
+
[ ]
|
215 |
+
[ NAME = 'Lee' ]
|
216 |
+
[ ]
|
217 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
218 |
+
[ [ NAME = 'Kim' ] ]
|
219 |
+
|
220 |
+
There can be any number of tags within a single feature structure.
|
221 |
+
|
222 |
+
>>> fs3 = nltk.FeatStruct("[A=(1)[B=b], C=(2)[], D->(1), E->(2)]")
|
223 |
+
>>> print(fs3)
|
224 |
+
[ A = (1) [ B = 'b' ] ]
|
225 |
+
[ ]
|
226 |
+
[ C = (2) [] ]
|
227 |
+
[ ]
|
228 |
+
[ D -> (1) ]
|
229 |
+
[ E -> (2) ]
|
230 |
+
>>> fs1 = nltk.FeatStruct(NUMBER=74, STREET='rue Pascal')
|
231 |
+
>>> fs2 = nltk.FeatStruct(CITY='Paris')
|
232 |
+
>>> print(nltk.unify(fs1, fs2))
|
233 |
+
[ CITY = 'Paris' ]
|
234 |
+
[ NUMBER = 74 ]
|
235 |
+
[ STREET = 'rue Pascal' ]
|
236 |
+
|
237 |
+
Unification is symmetric:
|
238 |
+
|
239 |
+
>>> nltk.unify(fs1, fs2) == nltk.unify(fs2, fs1)
|
240 |
+
True
|
241 |
+
|
242 |
+
Unification is commutative:
|
243 |
+
|
244 |
+
>>> fs3 = nltk.FeatStruct(TELNO='01 27 86 42 96')
|
245 |
+
>>> nltk.unify(nltk.unify(fs1, fs2), fs3) == nltk.unify(fs1, nltk.unify(fs2, fs3))
|
246 |
+
True
|
247 |
+
|
248 |
+
Unification between *FS*:math:`_0` and *FS*:math:`_1` will fail if the
|
249 |
+
two feature structures share a path |pi|,
|
250 |
+
but the value of |pi| in *FS*:math:`_0` is a distinct
|
251 |
+
atom from the value of |pi| in *FS*:math:`_1`. In NLTK,
|
252 |
+
this is implemented by setting the result of unification to be
|
253 |
+
``None``.
|
254 |
+
|
255 |
+
>>> fs0 = nltk.FeatStruct(A='a')
|
256 |
+
>>> fs1 = nltk.FeatStruct(A='b')
|
257 |
+
>>> print(nltk.unify(fs0, fs1))
|
258 |
+
None
|
259 |
+
|
260 |
+
Now, if we look at how unification interacts with structure-sharing,
|
261 |
+
things become really interesting.
|
262 |
+
|
263 |
+
|
264 |
+
|
265 |
+
>>> fs0 = nltk.FeatStruct("""[NAME=Lee,
|
266 |
+
... ADDRESS=[NUMBER=74,
|
267 |
+
... STREET='rue Pascal'],
|
268 |
+
... SPOUSE= [NAME=Kim,
|
269 |
+
... ADDRESS=[NUMBER=74,
|
270 |
+
... STREET='rue Pascal']]]""")
|
271 |
+
>>> print(fs0)
|
272 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
273 |
+
[ [ STREET = 'rue Pascal' ] ]
|
274 |
+
[ ]
|
275 |
+
[ NAME = 'Lee' ]
|
276 |
+
[ ]
|
277 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
278 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
279 |
+
[ [ ] ]
|
280 |
+
[ [ NAME = 'Kim' ] ]
|
281 |
+
|
282 |
+
|
283 |
+
>>> fs1 = nltk.FeatStruct("[SPOUSE=[ADDRESS=[CITY=Paris]]]")
|
284 |
+
>>> print(nltk.unify(fs0, fs1))
|
285 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
286 |
+
[ [ STREET = 'rue Pascal' ] ]
|
287 |
+
[ ]
|
288 |
+
[ NAME = 'Lee' ]
|
289 |
+
[ ]
|
290 |
+
[ [ [ CITY = 'Paris' ] ] ]
|
291 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
292 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
293 |
+
[ [ ] ]
|
294 |
+
[ [ NAME = 'Kim' ] ]
|
295 |
+
|
296 |
+
>>> fs2 = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
297 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
298 |
+
|
299 |
+
|
300 |
+
>>> print(fs2)
|
301 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
302 |
+
[ [ STREET = 'rue Pascal' ] ]
|
303 |
+
[ ]
|
304 |
+
[ NAME = 'Lee' ]
|
305 |
+
[ ]
|
306 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
307 |
+
[ [ NAME = 'Kim' ] ]
|
308 |
+
|
309 |
+
|
310 |
+
>>> print(nltk.unify(fs2, fs1))
|
311 |
+
[ [ CITY = 'Paris' ] ]
|
312 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
313 |
+
[ [ STREET = 'rue Pascal' ] ]
|
314 |
+
[ ]
|
315 |
+
[ NAME = 'Lee' ]
|
316 |
+
[ ]
|
317 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
318 |
+
[ [ NAME = 'Kim' ] ]
|
319 |
+
|
320 |
+
|
321 |
+
>>> fs1 = nltk.FeatStruct("[ADDRESS1=[NUMBER=74, STREET='rue Pascal']]")
|
322 |
+
>>> fs2 = nltk.FeatStruct("[ADDRESS1=?x, ADDRESS2=?x]")
|
323 |
+
>>> print(fs2)
|
324 |
+
[ ADDRESS1 = ?x ]
|
325 |
+
[ ADDRESS2 = ?x ]
|
326 |
+
>>> print(nltk.unify(fs1, fs2))
|
327 |
+
[ ADDRESS1 = (1) [ NUMBER = 74 ] ]
|
328 |
+
[ [ STREET = 'rue Pascal' ] ]
|
329 |
+
[ ]
|
330 |
+
[ ADDRESS2 -> (1) ]
|
331 |
+
|
332 |
+
|
333 |
+
|
334 |
+
|
335 |
+
>>> sent = 'who do you claim that you like'
|
336 |
+
>>> tokens = sent.split()
|
337 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1)
|
338 |
+
>>> trees = cp.parse(tokens)
|
339 |
+
|.w.d.y.c.t.y.l.|
|
340 |
+
|[-] . . . . . .| [0:1] 'who'
|
341 |
+
|. [-] . . . . .| [1:2] 'do'
|
342 |
+
|. . [-] . . . .| [2:3] 'you'
|
343 |
+
|. . . [-] . . .| [3:4] 'claim'
|
344 |
+
|. . . . [-] . .| [4:5] 'that'
|
345 |
+
|. . . . . [-] .| [5:6] 'you'
|
346 |
+
|. . . . . . [-]| [6:7] 'like'
|
347 |
+
|# . . . . . . .| [0:0] NP[]/NP[] -> *
|
348 |
+
|. # . . . . . .| [1:1] NP[]/NP[] -> *
|
349 |
+
|. . # . . . . .| [2:2] NP[]/NP[] -> *
|
350 |
+
|. . . # . . . .| [3:3] NP[]/NP[] -> *
|
351 |
+
|. . . . # . . .| [4:4] NP[]/NP[] -> *
|
352 |
+
|. . . . . # . .| [5:5] NP[]/NP[] -> *
|
353 |
+
|. . . . . . # .| [6:6] NP[]/NP[] -> *
|
354 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
355 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
356 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
357 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
358 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
359 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
360 |
+
|. [-> . . . . .| [1:2] S[+INV] -> V[+AUX] * NP[] VP[] {}
|
361 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
362 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
363 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
364 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
365 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * VP[] {}
|
366 |
+
|. . [-> . . . .| [2:3] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
367 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * S[]/NP[] {}
|
368 |
+
|. [---> . . . .| [1:3] S[+INV] -> V[+AUX] NP[] * VP[] {}
|
369 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
370 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
371 |
+
|. . . [-> . . .| [3:4] VP[] -> V[-AUX, SUBCAT='clause'] * SBar[] {}
|
372 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
373 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
374 |
+
|. . . . [-> . .| [4:5] SBar[] -> Comp[] * S[-INV] {}
|
375 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
376 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
377 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * VP[] {}
|
378 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
379 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * S[]/NP[] {}
|
380 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
381 |
+
|. . . . . . [->| [6:7] VP[] -> V[-AUX, SUBCAT='trans'] * NP[] {}
|
382 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
383 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
384 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
385 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
386 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
387 |
+
|. . [---------]| [2:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
388 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
389 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
390 |
+
|
391 |
+
>>> trees = list(trees)
|
392 |
+
>>> for tree in trees: print(tree)
|
393 |
+
(S[-INV]
|
394 |
+
(NP[+WH] who)
|
395 |
+
(S[+INV]/NP[]
|
396 |
+
(V[+AUX] do)
|
397 |
+
(NP[-WH] you)
|
398 |
+
(VP[]/NP[]
|
399 |
+
(V[-AUX, SUBCAT='clause'] claim)
|
400 |
+
(SBar[]/NP[]
|
401 |
+
(Comp[] that)
|
402 |
+
(S[-INV]/NP[]
|
403 |
+
(NP[-WH] you)
|
404 |
+
(VP[]/NP[] (V[-AUX, SUBCAT='trans'] like) (NP[]/NP[] )))))))
|
405 |
+
|
406 |
+
A different parser should give the same parse trees, but perhaps in a different order:
|
407 |
+
|
408 |
+
>>> cp2 = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1,
|
409 |
+
... parser=parse.FeatureEarleyChartParser)
|
410 |
+
>>> trees2 = cp2.parse(tokens)
|
411 |
+
|.w.d.y.c.t.y.l.|
|
412 |
+
|[-] . . . . . .| [0:1] 'who'
|
413 |
+
|. [-] . . . . .| [1:2] 'do'
|
414 |
+
|. . [-] . . . .| [2:3] 'you'
|
415 |
+
|. . . [-] . . .| [3:4] 'claim'
|
416 |
+
|. . . . [-] . .| [4:5] 'that'
|
417 |
+
|. . . . . [-] .| [5:6] 'you'
|
418 |
+
|. . . . . . [-]| [6:7] 'like'
|
419 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] VP[] {}
|
420 |
+
|> . . . . . . .| [0:0] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
421 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] S[]/NP[] {}
|
422 |
+
|> . . . . . . .| [0:0] S[-INV] -> * Adv[+NEG] S[+INV] {}
|
423 |
+
|> . . . . . . .| [0:0] S[+INV] -> * V[+AUX] NP[] VP[] {}
|
424 |
+
|> . . . . . . .| [0:0] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
425 |
+
|> . . . . . . .| [0:0] NP[+WH] -> * 'who' {}
|
426 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
427 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
428 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
429 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
430 |
+
|. > . . . . . .| [1:1] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
431 |
+
|. > . . . . . .| [1:1] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
432 |
+
|. > . . . . . .| [1:1] V[+AUX] -> * 'do' {}
|
433 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
434 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
435 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
436 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
437 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
438 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
439 |
+
|. > . . . . . .| [1:1] VP[] -> * V[+AUX] VP[] {}
|
440 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
441 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
442 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
443 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
444 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
445 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
446 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
447 |
+
|. . > . . . . .| [2:2] VP[] -> * V[+AUX] VP[] {}
|
448 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
449 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
450 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
451 |
+
|. . > . . . . .| [2:2] NP[-WH] -> * 'you' {}
|
452 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
453 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
454 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
455 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
456 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
457 |
+
|. . . > . . . .| [3:3] V[-AUX, SUBCAT='clause'] -> * 'claim' {}
|
458 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
459 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
460 |
+
|. . . . > . . .| [4:4] SBar[]/?x[] -> * Comp[] S[-INV]/?x[] {}
|
461 |
+
|. . . . > . . .| [4:4] Comp[] -> * 'that' {}
|
462 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
463 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
464 |
+
|. . . . . > . .| [5:5] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
465 |
+
|. . . . . > . .| [5:5] NP[-WH] -> * 'you' {}
|
466 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
467 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
468 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
469 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
470 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
471 |
+
|. . . . . . > .| [6:6] V[-AUX, SUBCAT='trans'] -> * 'like' {}
|
472 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
473 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
474 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
475 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
476 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
477 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
478 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
479 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
480 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
481 |
+
|
482 |
+
>>> sorted(trees) == sorted(trees2)
|
483 |
+
True
|
484 |
+
|
485 |
+
|
486 |
+
Let's load a German grammar:
|
487 |
+
|
488 |
+
>>> cp = parse.load_parser('grammars/book_grammars/german.fcfg', trace=0)
|
489 |
+
>>> sent = 'die Katze sieht den Hund'
|
490 |
+
>>> tokens = sent.split()
|
491 |
+
>>> trees = cp.parse(tokens)
|
492 |
+
>>> for tree in trees: print(tree)
|
493 |
+
(S[]
|
494 |
+
(NP[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom']
|
495 |
+
(Det[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] die)
|
496 |
+
(N[AGR=[GND='fem', NUM='sg', PER=3]] Katze))
|
497 |
+
(VP[AGR=[NUM='sg', PER=3]]
|
498 |
+
(TV[AGR=[NUM='sg', PER=3], OBJCASE='acc'] sieht)
|
499 |
+
(NP[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc']
|
500 |
+
(Det[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] den)
|
501 |
+
(N[AGR=[GND='masc', NUM='sg', PER=3]] Hund))))
|
502 |
+
|
503 |
+
Grammar with Binding Operators
|
504 |
+
------------------------------
|
505 |
+
The bindop.fcfg grammar is a semantic grammar that uses lambda
|
506 |
+
calculus. Each element has a core semantics, which is a single lambda
|
507 |
+
calculus expression; and a set of binding operators, which bind
|
508 |
+
variables.
|
509 |
+
|
510 |
+
In order to make the binding operators work right, they need to
|
511 |
+
instantiate their bound variable every time they are added to the
|
512 |
+
chart. To do this, we use a special subclass of `Chart`, called
|
513 |
+
`InstantiateVarsChart`.
|
514 |
+
|
515 |
+
>>> from nltk.parse.featurechart import InstantiateVarsChart
|
516 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=1,
|
517 |
+
... chart_class=InstantiateVarsChart)
|
518 |
+
>>> print(cp.grammar())
|
519 |
+
Grammar with 15 productions (start state = S[])
|
520 |
+
S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] VP[SEM=[BO=?b2, CORE=?vp]]
|
521 |
+
VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] NP[SEM=[BO=?b2, CORE=?obj]]
|
522 |
+
VP[SEM=?s] -> IV[SEM=?s]
|
523 |
+
NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] N[SEM=[BO=?b2, CORE=?n]]
|
524 |
+
Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a'
|
525 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'dog'
|
526 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'cat'
|
527 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'mouse'
|
528 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks'
|
529 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'eats'
|
530 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'walks'
|
531 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds'
|
532 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'walks'
|
533 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'john'
|
534 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'alex'
|
535 |
+
|
536 |
+
A simple intransitive sentence:
|
537 |
+
|
538 |
+
>>> from nltk.sem import logic
|
539 |
+
>>> logic._counter._value = 100
|
540 |
+
|
541 |
+
>>> trees = cp.parse('john barks'.split())
|
542 |
+
|. john.barks.|
|
543 |
+
|[-----] .| [0:1] 'john'
|
544 |
+
|. [-----]| [1:2] 'barks'
|
545 |
+
|[-----] .| [0:1] NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] -> 'john' *
|
546 |
+
|[-----> .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
547 |
+
|. [-----]| [1:2] IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' *
|
548 |
+
|. [-----]| [1:2] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
549 |
+
|[===========]| [0:2] S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
550 |
+
>>> for tree in trees: print(tree)
|
551 |
+
(S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]]
|
552 |
+
(NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] john)
|
553 |
+
(VP[SEM=[BO={/}, CORE=<\x.bark(x)>]]
|
554 |
+
(IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] barks)))
|
555 |
+
|
556 |
+
A transitive sentence:
|
557 |
+
|
558 |
+
>>> trees = cp.parse('john feeds a dog'.split())
|
559 |
+
|.joh.fee. a .dog.|
|
560 |
+
|[---] . . .| [0:1] 'john'
|
561 |
+
|. [---] . .| [1:2] 'feeds'
|
562 |
+
|. . [---] .| [2:3] 'a'
|
563 |
+
|. . . [---]| [3:4] 'dog'
|
564 |
+
|[---] . . .| [0:1] NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] -> 'john' *
|
565 |
+
|[---> . . .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
566 |
+
|. [---] . .| [1:2] TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' *
|
567 |
+
|. [---> . .| [1:2] VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] * NP[SEM=[BO=?b2, CORE=?obj]] {?b1: {/}, ?v: <LambdaExpression \x y.feed(y,x)>}
|
568 |
+
|. . [---] .| [2:3] Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' *
|
569 |
+
|. . [---> .| [2:3] NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] * N[SEM=[BO=?b2, CORE=?n]] {?b1: {/}, ?det: <LambdaExpression \Q P.exists x.(Q(x) & P(x))>}
|
570 |
+
|. . . [---]| [3:4] N[SEM=[BO={/}, CORE=<dog>]] -> 'dog' *
|
571 |
+
|. . [-------]| [2:4] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]] -> Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] N[SEM=[BO={/}, CORE=<dog>]] *
|
572 |
+
|. . [------->| [2:4] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.exists x.(dog(x) & P(x)),z2)}, ?subj: <IndividualVariableExpression z2>}
|
573 |
+
|. [-----------]| [1:4] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] -> TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<z2>]] *
|
574 |
+
|[===============]| [0:4] S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<\y.feed(y,z3)>]] *
|
575 |
+
|
576 |
+
>>> for tree in trees: print(tree)
|
577 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
578 |
+
(NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] john)
|
579 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
580 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
581 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]]
|
582 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
583 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
584 |
+
|
585 |
+
Turn down the verbosity:
|
586 |
+
|
587 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=0,
|
588 |
+
... chart_class=InstantiateVarsChart)
|
589 |
+
|
590 |
+
Reuse the same lexical item twice:
|
591 |
+
|
592 |
+
>>> trees = cp.parse('john feeds john'.split())
|
593 |
+
>>> for tree in trees: print(tree)
|
594 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.P(John),z3)}, CORE=<feed(z2,z3)>]]
|
595 |
+
(NP[SEM=[BO={bo(\P.P(John),z104)}, CORE=<z104>]] john)
|
596 |
+
(VP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<\y.feed(y,z2)>]]
|
597 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
598 |
+
(NP[SEM=[BO={bo(\P.P(John),z105)}, CORE=<z105>]] john)))
|
599 |
+
|
600 |
+
>>> trees = cp.parse('a dog feeds a dog'.split())
|
601 |
+
>>> for tree in trees: print(tree)
|
602 |
+
(S[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
603 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z106)}, CORE=<z106>]]
|
604 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
605 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))
|
606 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
607 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
608 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z107)}, CORE=<z107>]]
|
609 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
610 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
venv/lib/python3.10/site-packages/nltk/test/featstruct.doctest
ADDED
@@ -0,0 +1,1229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==================================
|
5 |
+
Feature Structures & Unification
|
6 |
+
==================================
|
7 |
+
>>> from nltk.featstruct import FeatStruct
|
8 |
+
>>> from nltk.sem.logic import Variable, VariableExpression, Expression
|
9 |
+
|
10 |
+
.. note:: For now, featstruct uses the older lambdalogic semantics
|
11 |
+
module. Eventually, it should be updated to use the new first
|
12 |
+
order predicate logic module.
|
13 |
+
|
14 |
+
Overview
|
15 |
+
~~~~~~~~
|
16 |
+
A feature structure is a mapping from feature identifiers to feature
|
17 |
+
values, where feature values can be simple values (like strings or
|
18 |
+
ints), nested feature structures, or variables:
|
19 |
+
|
20 |
+
>>> fs1 = FeatStruct(number='singular', person=3)
|
21 |
+
>>> print(fs1)
|
22 |
+
[ number = 'singular' ]
|
23 |
+
[ person = 3 ]
|
24 |
+
|
25 |
+
Feature structure may be nested:
|
26 |
+
|
27 |
+
>>> fs2 = FeatStruct(type='NP', agr=fs1)
|
28 |
+
>>> print(fs2)
|
29 |
+
[ agr = [ number = 'singular' ] ]
|
30 |
+
[ [ person = 3 ] ]
|
31 |
+
[ ]
|
32 |
+
[ type = 'NP' ]
|
33 |
+
|
34 |
+
Variables are used to indicate that two features should be assigned
|
35 |
+
the same value. For example, the following feature structure requires
|
36 |
+
that the feature fs3['agr']['number'] be bound to the same value as the
|
37 |
+
feature fs3['subj']['number'].
|
38 |
+
|
39 |
+
>>> fs3 = FeatStruct(agr=FeatStruct(number=Variable('?n')),
|
40 |
+
... subj=FeatStruct(number=Variable('?n')))
|
41 |
+
>>> print(fs3)
|
42 |
+
[ agr = [ number = ?n ] ]
|
43 |
+
[ ]
|
44 |
+
[ subj = [ number = ?n ] ]
|
45 |
+
|
46 |
+
Feature structures are typically used to represent partial information
|
47 |
+
about objects. A feature name that is not mapped to a value stands
|
48 |
+
for a feature whose value is unknown (*not* a feature without a
|
49 |
+
value). Two feature structures that represent (potentially
|
50 |
+
overlapping) information about the same object can be combined by
|
51 |
+
*unification*.
|
52 |
+
|
53 |
+
>>> print(fs2.unify(fs3))
|
54 |
+
[ agr = [ number = 'singular' ] ]
|
55 |
+
[ [ person = 3 ] ]
|
56 |
+
[ ]
|
57 |
+
[ subj = [ number = 'singular' ] ]
|
58 |
+
[ ]
|
59 |
+
[ type = 'NP' ]
|
60 |
+
|
61 |
+
When two inconsistent feature structures are unified, the unification
|
62 |
+
fails and returns ``None``.
|
63 |
+
|
64 |
+
>>> fs4 = FeatStruct(agr=FeatStruct(person=1))
|
65 |
+
>>> print(fs4.unify(fs2))
|
66 |
+
None
|
67 |
+
>>> print(fs2.unify(fs4))
|
68 |
+
None
|
69 |
+
|
70 |
+
..
|
71 |
+
>>> del fs1, fs2, fs3, fs4 # clean-up
|
72 |
+
|
73 |
+
Feature Structure Types
|
74 |
+
-----------------------
|
75 |
+
There are actually two types of feature structure:
|
76 |
+
|
77 |
+
- *feature dictionaries*, implemented by `FeatDict`, act like
|
78 |
+
Python dictionaries. Feature identifiers may be strings or
|
79 |
+
instances of the `Feature` class.
|
80 |
+
- *feature lists*, implemented by `FeatList`, act like Python
|
81 |
+
lists. Feature identifiers are integers.
|
82 |
+
|
83 |
+
When you construct a feature structure using the `FeatStruct`
|
84 |
+
constructor, it will automatically decide which type is appropriate:
|
85 |
+
|
86 |
+
>>> type(FeatStruct(number='singular'))
|
87 |
+
<class 'nltk.featstruct.FeatDict'>
|
88 |
+
>>> type(FeatStruct([1,2,3]))
|
89 |
+
<class 'nltk.featstruct.FeatList'>
|
90 |
+
|
91 |
+
Usually, we will just use feature dictionaries; but sometimes feature
|
92 |
+
lists can be useful too. Two feature lists will unify with each other
|
93 |
+
only if they have equal lengths, and all of their feature values
|
94 |
+
match. If you wish to write a feature list that contains 'unknown'
|
95 |
+
values, you must use variables:
|
96 |
+
|
97 |
+
>>> fs1 = FeatStruct([1,2,Variable('?y')])
|
98 |
+
>>> fs2 = FeatStruct([1,Variable('?x'),3])
|
99 |
+
>>> fs1.unify(fs2)
|
100 |
+
[1, 2, 3]
|
101 |
+
|
102 |
+
..
|
103 |
+
>>> del fs1, fs2 # clean-up
|
104 |
+
|
105 |
+
Parsing Feature Structure Strings
|
106 |
+
---------------------------------
|
107 |
+
Feature structures can be constructed directly from strings. Often,
|
108 |
+
this is more convenient than constructing them directly. NLTK can
|
109 |
+
parse most feature strings to produce the corresponding feature
|
110 |
+
structures. (But you must restrict your base feature values to
|
111 |
+
strings, ints, logic expressions (`nltk.sem.logic.Expression`), and a
|
112 |
+
few other types discussed below).
|
113 |
+
|
114 |
+
Feature dictionaries are written like Python dictionaries, except that
|
115 |
+
keys are not put in quotes; and square brackets (``[]``) are used
|
116 |
+
instead of braces (``{}``):
|
117 |
+
|
118 |
+
>>> FeatStruct('[tense="past", agr=[number="sing", person=3]]')
|
119 |
+
[agr=[number='sing', person=3], tense='past']
|
120 |
+
|
121 |
+
If a feature value is a single alphanumeric word, then it does not
|
122 |
+
need to be quoted -- it will be automatically treated as a string:
|
123 |
+
|
124 |
+
>>> FeatStruct('[tense=past, agr=[number=sing, person=3]]')
|
125 |
+
[agr=[number='sing', person=3], tense='past']
|
126 |
+
|
127 |
+
Feature lists are written like python lists:
|
128 |
+
|
129 |
+
>>> FeatStruct('[1, 2, 3]')
|
130 |
+
[1, 2, 3]
|
131 |
+
|
132 |
+
The expression ``[]`` is treated as an empty feature dictionary, not
|
133 |
+
an empty feature list:
|
134 |
+
|
135 |
+
>>> type(FeatStruct('[]'))
|
136 |
+
<class 'nltk.featstruct.FeatDict'>
|
137 |
+
|
138 |
+
Feature Paths
|
139 |
+
-------------
|
140 |
+
Features can be specified using *feature paths*, or tuples of feature
|
141 |
+
identifiers that specify path through the nested feature structures to
|
142 |
+
a value.
|
143 |
+
|
144 |
+
>>> fs1 = FeatStruct('[x=1, y=[1,2,[z=3]]]')
|
145 |
+
>>> fs1['y']
|
146 |
+
[1, 2, [z=3]]
|
147 |
+
>>> fs1['y', 2]
|
148 |
+
[z=3]
|
149 |
+
>>> fs1['y', 2, 'z']
|
150 |
+
3
|
151 |
+
|
152 |
+
..
|
153 |
+
>>> del fs1 # clean-up
|
154 |
+
|
155 |
+
Reentrance
|
156 |
+
----------
|
157 |
+
Feature structures may contain reentrant feature values. A *reentrant
|
158 |
+
feature value* is a single feature structure that can be accessed via
|
159 |
+
multiple feature paths.
|
160 |
+
|
161 |
+
>>> fs1 = FeatStruct(x='val')
|
162 |
+
>>> fs2 = FeatStruct(a=fs1, b=fs1)
|
163 |
+
>>> print(fs2)
|
164 |
+
[ a = (1) [ x = 'val' ] ]
|
165 |
+
[ ]
|
166 |
+
[ b -> (1) ]
|
167 |
+
>>> fs2
|
168 |
+
[a=(1)[x='val'], b->(1)]
|
169 |
+
|
170 |
+
As you can see, reentrane is displayed by marking a feature structure
|
171 |
+
with a unique identifier, in this case ``(1)``, the first time it is
|
172 |
+
encountered; and then using the special form ``var -> id`` whenever it
|
173 |
+
is encountered again. You can use the same notation to directly
|
174 |
+
create reentrant feature structures from strings.
|
175 |
+
|
176 |
+
>>> FeatStruct('[a=(1)[], b->(1), c=[d->(1)]]')
|
177 |
+
[a=(1)[], b->(1), c=[d->(1)]]
|
178 |
+
|
179 |
+
Reentrant feature structures may contain cycles:
|
180 |
+
|
181 |
+
>>> fs3 = FeatStruct('(1)[a->(1)]')
|
182 |
+
>>> fs3['a', 'a', 'a', 'a']
|
183 |
+
(1)[a->(1)]
|
184 |
+
>>> fs3['a', 'a', 'a', 'a'] is fs3
|
185 |
+
True
|
186 |
+
|
187 |
+
Unification preserves the reentrance relations imposed by both of the
|
188 |
+
unified feature structures. In the feature structure resulting from
|
189 |
+
unification, any modifications to a reentrant feature value will be
|
190 |
+
visible using any of its feature paths.
|
191 |
+
|
192 |
+
>>> fs3.unify(FeatStruct('[a=[b=12], c=33]'))
|
193 |
+
(1)[a->(1), b=12, c=33]
|
194 |
+
|
195 |
+
..
|
196 |
+
>>> del fs1, fs2, fs3 # clean-up
|
197 |
+
|
198 |
+
Feature Structure Equality
|
199 |
+
--------------------------
|
200 |
+
Two feature structures are considered equal if they assign the same
|
201 |
+
values to all features, *and* they contain the same reentrances.
|
202 |
+
|
203 |
+
>>> fs1 = FeatStruct('[a=(1)[x=1], b->(1)]')
|
204 |
+
>>> fs2 = FeatStruct('[a=(1)[x=1], b->(1)]')
|
205 |
+
>>> fs3 = FeatStruct('[a=[x=1], b=[x=1]]')
|
206 |
+
>>> fs1 == fs1, fs1 is fs1
|
207 |
+
(True, True)
|
208 |
+
>>> fs1 == fs2, fs1 is fs2
|
209 |
+
(True, False)
|
210 |
+
>>> fs1 == fs3, fs1 is fs3
|
211 |
+
(False, False)
|
212 |
+
|
213 |
+
Note that this differs from how Python dictionaries and lists define
|
214 |
+
equality -- in particular, Python dictionaries and lists ignore
|
215 |
+
reentrance relations. To test two feature structures for equality
|
216 |
+
while ignoring reentrance relations, use the `equal_values()` method:
|
217 |
+
|
218 |
+
>>> fs1.equal_values(fs1)
|
219 |
+
True
|
220 |
+
>>> fs1.equal_values(fs2)
|
221 |
+
True
|
222 |
+
>>> fs1.equal_values(fs3)
|
223 |
+
True
|
224 |
+
|
225 |
+
..
|
226 |
+
>>> del fs1, fs2, fs3 # clean-up
|
227 |
+
|
228 |
+
Feature Value Sets & Feature Value Tuples
|
229 |
+
-----------------------------------------
|
230 |
+
`nltk.featstruct` defines two new data types that are intended to be
|
231 |
+
used as feature values: `FeatureValueTuple` and `FeatureValueSet`.
|
232 |
+
Both of these types are considered base values -- i.e., unification
|
233 |
+
does *not* apply to them. However, variable binding *does* apply to
|
234 |
+
any values that they contain.
|
235 |
+
|
236 |
+
Feature value tuples are written with parentheses:
|
237 |
+
|
238 |
+
>>> fs1 = FeatStruct('[x=(?x, ?y)]')
|
239 |
+
>>> fs1
|
240 |
+
[x=(?x, ?y)]
|
241 |
+
>>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
|
242 |
+
[x=(1, 2)]
|
243 |
+
|
244 |
+
Feature sets are written with braces:
|
245 |
+
|
246 |
+
>>> fs1 = FeatStruct('[x={?x, ?y}]')
|
247 |
+
>>> fs1
|
248 |
+
[x={?x, ?y}]
|
249 |
+
>>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
|
250 |
+
[x={1, 2}]
|
251 |
+
|
252 |
+
In addition to the basic feature value tuple & set classes, nltk
|
253 |
+
defines feature value unions (for sets) and feature value
|
254 |
+
concatenations (for tuples). These are written using '+', and can be
|
255 |
+
used to combine sets & tuples:
|
256 |
+
|
257 |
+
>>> fs1 = FeatStruct('[x=((1, 2)+?z), z=?z]')
|
258 |
+
>>> fs1
|
259 |
+
[x=((1, 2)+?z), z=?z]
|
260 |
+
>>> fs1.unify(FeatStruct('[z=(3, 4, 5)]'))
|
261 |
+
[x=(1, 2, 3, 4, 5), z=(3, 4, 5)]
|
262 |
+
|
263 |
+
Thus, feature value tuples and sets can be used to build up tuples
|
264 |
+
and sets of values over the course of unification. For example, when
|
265 |
+
parsing sentences using a semantic feature grammar, feature sets or
|
266 |
+
feature tuples can be used to build a list of semantic predicates as
|
267 |
+
the sentence is parsed.
|
268 |
+
|
269 |
+
As was mentioned above, unification does not apply to feature value
|
270 |
+
tuples and sets. One reason for this that it's impossible to define a
|
271 |
+
single correct answer for unification when concatenation is used.
|
272 |
+
Consider the following example:
|
273 |
+
|
274 |
+
>>> fs1 = FeatStruct('[x=(1, 2, 3, 4)]')
|
275 |
+
>>> fs2 = FeatStruct('[x=(?a+?b), a=?a, b=?b]')
|
276 |
+
|
277 |
+
If unification applied to feature tuples, then the unification
|
278 |
+
algorithm would have to arbitrarily choose how to divide the tuple
|
279 |
+
(1,2,3,4) into two parts. Instead, the unification algorithm refuses
|
280 |
+
to make this decision, and simply unifies based on value. Because
|
281 |
+
(1,2,3,4) is not equal to (?a+?b), fs1 and fs2 will not unify:
|
282 |
+
|
283 |
+
>>> print(fs1.unify(fs2))
|
284 |
+
None
|
285 |
+
|
286 |
+
If you need a list-like structure that unification does apply to, use
|
287 |
+
`FeatList`.
|
288 |
+
|
289 |
+
..
|
290 |
+
>>> del fs1, fs2 # clean-up
|
291 |
+
|
292 |
+
Light-weight Feature Structures
|
293 |
+
-------------------------------
|
294 |
+
Many of the functions defined by `nltk.featstruct` can be applied
|
295 |
+
directly to simple Python dictionaries and lists, rather than to
|
296 |
+
full-fledged `FeatDict` and `FeatList` objects. In other words,
|
297 |
+
Python ``dicts`` and ``lists`` can be used as "light-weight" feature
|
298 |
+
structures.
|
299 |
+
|
300 |
+
>>> # Note: pprint prints dicts sorted
|
301 |
+
>>> from pprint import pprint
|
302 |
+
>>> from nltk.featstruct import unify
|
303 |
+
>>> pprint(unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))))
|
304 |
+
{'a': 'a', 'x': 1, 'y': {'b': 'b'}}
|
305 |
+
|
306 |
+
However, you should keep in mind the following caveats:
|
307 |
+
|
308 |
+
- Python dictionaries & lists ignore reentrance when checking for
|
309 |
+
equality between values. But two FeatStructs with different
|
310 |
+
reentrances are considered nonequal, even if all their base
|
311 |
+
values are equal.
|
312 |
+
|
313 |
+
- FeatStructs can be easily frozen, allowing them to be used as
|
314 |
+
keys in hash tables. Python dictionaries and lists can not.
|
315 |
+
|
316 |
+
- FeatStructs display reentrance in their string representations;
|
317 |
+
Python dictionaries and lists do not.
|
318 |
+
|
319 |
+
- FeatStructs may *not* be mixed with Python dictionaries and lists
|
320 |
+
(e.g., when performing unification).
|
321 |
+
|
322 |
+
- FeatStructs provide a number of useful methods, such as `walk()`
|
323 |
+
and `cyclic()`, which are not available for Python dicts & lists.
|
324 |
+
|
325 |
+
In general, if your feature structures will contain any reentrances,
|
326 |
+
or if you plan to use them as dictionary keys, it is strongly
|
327 |
+
recommended that you use full-fledged `FeatStruct` objects.
|
328 |
+
|
329 |
+
Custom Feature Values
|
330 |
+
---------------------
|
331 |
+
The abstract base class `CustomFeatureValue` can be used to define new
|
332 |
+
base value types that have custom unification methods. For example,
|
333 |
+
the following feature value type encodes a range, and defines
|
334 |
+
unification as taking the intersection on the ranges:
|
335 |
+
|
336 |
+
>>> from functools import total_ordering
|
337 |
+
>>> from nltk.featstruct import CustomFeatureValue, UnificationFailure
|
338 |
+
>>> @total_ordering
|
339 |
+
... class Range(CustomFeatureValue):
|
340 |
+
... def __init__(self, low, high):
|
341 |
+
... assert low <= high
|
342 |
+
... self.low = low
|
343 |
+
... self.high = high
|
344 |
+
... def unify(self, other):
|
345 |
+
... if not isinstance(other, Range):
|
346 |
+
... return UnificationFailure
|
347 |
+
... low = max(self.low, other.low)
|
348 |
+
... high = min(self.high, other.high)
|
349 |
+
... if low <= high: return Range(low, high)
|
350 |
+
... else: return UnificationFailure
|
351 |
+
... def __repr__(self):
|
352 |
+
... return '(%s<x<%s)' % (self.low, self.high)
|
353 |
+
... def __eq__(self, other):
|
354 |
+
... if not isinstance(other, Range):
|
355 |
+
... return False
|
356 |
+
... return (self.low == other.low) and (self.high == other.high)
|
357 |
+
... def __lt__(self, other):
|
358 |
+
... if not isinstance(other, Range):
|
359 |
+
... return True
|
360 |
+
... return (self.low, self.high) < (other.low, other.high)
|
361 |
+
|
362 |
+
>>> fs1 = FeatStruct(x=Range(5,8), y=FeatStruct(z=Range(7,22)))
|
363 |
+
>>> print(fs1.unify(FeatStruct(x=Range(6, 22))))
|
364 |
+
[ x = (6<x<8) ]
|
365 |
+
[ ]
|
366 |
+
[ y = [ z = (7<x<22) ] ]
|
367 |
+
>>> print(fs1.unify(FeatStruct(x=Range(9, 12))))
|
368 |
+
None
|
369 |
+
>>> print(fs1.unify(FeatStruct(x=12)))
|
370 |
+
None
|
371 |
+
>>> print(fs1.unify(FeatStruct('[x=?x, y=[z=?x]]')))
|
372 |
+
[ x = (7<x<8) ]
|
373 |
+
[ ]
|
374 |
+
[ y = [ z = (7<x<8) ] ]
|
375 |
+
|
376 |
+
Regression Tests
|
377 |
+
~~~~~~~~~~~~~~~~
|
378 |
+
|
379 |
+
Dictionary access methods (non-mutating)
|
380 |
+
----------------------------------------
|
381 |
+
|
382 |
+
>>> fs1 = FeatStruct(a=1, b=2, c=3)
|
383 |
+
>>> fs2 = FeatStruct(x=fs1, y='x')
|
384 |
+
|
385 |
+
Feature structures support all dictionary methods (excluding the class
|
386 |
+
method `dict.fromkeys()`). Non-mutating methods:
|
387 |
+
|
388 |
+
>>> sorted(fs2.keys()) # keys()
|
389 |
+
['x', 'y']
|
390 |
+
>>> sorted(fs2.values()) # values()
|
391 |
+
[[a=1, b=2, c=3], 'x']
|
392 |
+
>>> sorted(fs2.items()) # items()
|
393 |
+
[('x', [a=1, b=2, c=3]), ('y', 'x')]
|
394 |
+
>>> sorted(fs2) # __iter__()
|
395 |
+
['x', 'y']
|
396 |
+
>>> 'a' in fs2, 'x' in fs2 # __contains__()
|
397 |
+
(False, True)
|
398 |
+
>>> fs2.has_key('a'), fs2.has_key('x') # has_key()
|
399 |
+
(False, True)
|
400 |
+
>>> fs2['x'], fs2['y'] # __getitem__()
|
401 |
+
([a=1, b=2, c=3], 'x')
|
402 |
+
>>> fs2['a'] # __getitem__()
|
403 |
+
Traceback (most recent call last):
|
404 |
+
. . .
|
405 |
+
KeyError: 'a'
|
406 |
+
>>> fs2.get('x'), fs2.get('y'), fs2.get('a') # get()
|
407 |
+
([a=1, b=2, c=3], 'x', None)
|
408 |
+
>>> fs2.get('x', 'hello'), fs2.get('a', 'hello') # get()
|
409 |
+
([a=1, b=2, c=3], 'hello')
|
410 |
+
>>> len(fs1), len(fs2) # __len__
|
411 |
+
(3, 2)
|
412 |
+
>>> fs2.copy() # copy()
|
413 |
+
[x=[a=1, b=2, c=3], y='x']
|
414 |
+
>>> fs2.copy() is fs2 # copy()
|
415 |
+
False
|
416 |
+
|
417 |
+
Note: by default, `FeatStruct.copy()` does a deep copy. Use
|
418 |
+
`FeatStruct.copy(deep=False)` for a shallow copy.
|
419 |
+
|
420 |
+
..
|
421 |
+
>>> del fs1, fs2 # clean-up.
|
422 |
+
|
423 |
+
Dictionary access methods (mutating)
|
424 |
+
------------------------------------
|
425 |
+
>>> fs1 = FeatStruct(a=1, b=2, c=3)
|
426 |
+
>>> fs2 = FeatStruct(x=fs1, y='x')
|
427 |
+
|
428 |
+
Setting features (`__setitem__()`)
|
429 |
+
|
430 |
+
>>> fs1['c'] = 5
|
431 |
+
>>> fs1
|
432 |
+
[a=1, b=2, c=5]
|
433 |
+
>>> fs1['x'] = 12
|
434 |
+
>>> fs1
|
435 |
+
[a=1, b=2, c=5, x=12]
|
436 |
+
>>> fs2['x', 'a'] = 2
|
437 |
+
>>> fs2
|
438 |
+
[x=[a=2, b=2, c=5, x=12], y='x']
|
439 |
+
>>> fs1
|
440 |
+
[a=2, b=2, c=5, x=12]
|
441 |
+
|
442 |
+
Deleting features (`__delitem__()`)
|
443 |
+
|
444 |
+
>>> del fs1['x']
|
445 |
+
>>> fs1
|
446 |
+
[a=2, b=2, c=5]
|
447 |
+
>>> del fs2['x', 'a']
|
448 |
+
>>> fs1
|
449 |
+
[b=2, c=5]
|
450 |
+
|
451 |
+
`setdefault()`:
|
452 |
+
|
453 |
+
>>> fs1.setdefault('b', 99)
|
454 |
+
2
|
455 |
+
>>> fs1
|
456 |
+
[b=2, c=5]
|
457 |
+
>>> fs1.setdefault('x', 99)
|
458 |
+
99
|
459 |
+
>>> fs1
|
460 |
+
[b=2, c=5, x=99]
|
461 |
+
|
462 |
+
`update()`:
|
463 |
+
|
464 |
+
>>> fs2.update({'a':'A', 'b':'B'}, c='C')
|
465 |
+
>>> fs2
|
466 |
+
[a='A', b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
467 |
+
|
468 |
+
`pop()`:
|
469 |
+
|
470 |
+
>>> fs2.pop('a')
|
471 |
+
'A'
|
472 |
+
>>> fs2
|
473 |
+
[b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
474 |
+
>>> fs2.pop('a')
|
475 |
+
Traceback (most recent call last):
|
476 |
+
. . .
|
477 |
+
KeyError: 'a'
|
478 |
+
>>> fs2.pop('a', 'foo')
|
479 |
+
'foo'
|
480 |
+
>>> fs2
|
481 |
+
[b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
482 |
+
|
483 |
+
`clear()`:
|
484 |
+
|
485 |
+
>>> fs1.clear()
|
486 |
+
>>> fs1
|
487 |
+
[]
|
488 |
+
>>> fs2
|
489 |
+
[b='B', c='C', x=[], y='x']
|
490 |
+
|
491 |
+
`popitem()`:
|
492 |
+
|
493 |
+
>>> sorted([fs2.popitem() for i in range(len(fs2))])
|
494 |
+
[('b', 'B'), ('c', 'C'), ('x', []), ('y', 'x')]
|
495 |
+
>>> fs2
|
496 |
+
[]
|
497 |
+
|
498 |
+
Once a feature structure has been frozen, it may not be mutated.
|
499 |
+
|
500 |
+
>>> fs1 = FeatStruct('[x=1, y=2, z=[a=3]]')
|
501 |
+
>>> fs1.freeze()
|
502 |
+
>>> fs1.frozen()
|
503 |
+
True
|
504 |
+
>>> fs1['z'].frozen()
|
505 |
+
True
|
506 |
+
|
507 |
+
>>> fs1['x'] = 5
|
508 |
+
Traceback (most recent call last):
|
509 |
+
. . .
|
510 |
+
ValueError: Frozen FeatStructs may not be modified.
|
511 |
+
>>> del fs1['x']
|
512 |
+
Traceback (most recent call last):
|
513 |
+
. . .
|
514 |
+
ValueError: Frozen FeatStructs may not be modified.
|
515 |
+
>>> fs1.clear()
|
516 |
+
Traceback (most recent call last):
|
517 |
+
. . .
|
518 |
+
ValueError: Frozen FeatStructs may not be modified.
|
519 |
+
>>> fs1.pop('x')
|
520 |
+
Traceback (most recent call last):
|
521 |
+
. . .
|
522 |
+
ValueError: Frozen FeatStructs may not be modified.
|
523 |
+
>>> fs1.popitem()
|
524 |
+
Traceback (most recent call last):
|
525 |
+
. . .
|
526 |
+
ValueError: Frozen FeatStructs may not be modified.
|
527 |
+
>>> fs1.setdefault('x')
|
528 |
+
Traceback (most recent call last):
|
529 |
+
. . .
|
530 |
+
ValueError: Frozen FeatStructs may not be modified.
|
531 |
+
>>> fs1.update(z=22)
|
532 |
+
Traceback (most recent call last):
|
533 |
+
. . .
|
534 |
+
ValueError: Frozen FeatStructs may not be modified.
|
535 |
+
|
536 |
+
..
|
537 |
+
>>> del fs1, fs2 # clean-up.
|
538 |
+
|
539 |
+
Feature Paths
|
540 |
+
-------------
|
541 |
+
Make sure that __getitem__ with feature paths works as intended:
|
542 |
+
|
543 |
+
>>> fs1 = FeatStruct(a=1, b=2,
|
544 |
+
... c=FeatStruct(
|
545 |
+
... d=FeatStruct(e=12),
|
546 |
+
... f=FeatStruct(g=55, h='hello')))
|
547 |
+
>>> fs1[()]
|
548 |
+
[a=1, b=2, c=[d=[e=12], f=[g=55, h='hello']]]
|
549 |
+
>>> fs1['a'], fs1[('a',)]
|
550 |
+
(1, 1)
|
551 |
+
>>> fs1['c','d','e']
|
552 |
+
12
|
553 |
+
>>> fs1['c','f','g']
|
554 |
+
55
|
555 |
+
|
556 |
+
Feature paths that select unknown features raise KeyError:
|
557 |
+
|
558 |
+
>>> fs1['c', 'f', 'e']
|
559 |
+
Traceback (most recent call last):
|
560 |
+
. . .
|
561 |
+
KeyError: ('c', 'f', 'e')
|
562 |
+
>>> fs1['q', 'p']
|
563 |
+
Traceback (most recent call last):
|
564 |
+
. . .
|
565 |
+
KeyError: ('q', 'p')
|
566 |
+
|
567 |
+
Feature paths that try to go 'through' a feature that's not a feature
|
568 |
+
structure raise KeyError:
|
569 |
+
|
570 |
+
>>> fs1['a', 'b']
|
571 |
+
Traceback (most recent call last):
|
572 |
+
. . .
|
573 |
+
KeyError: ('a', 'b')
|
574 |
+
|
575 |
+
Feature paths can go through reentrant structures:
|
576 |
+
|
577 |
+
>>> fs2 = FeatStruct('(1)[a=[b=[c->(1), d=5], e=11]]')
|
578 |
+
>>> fs2['a', 'b', 'c', 'a', 'e']
|
579 |
+
11
|
580 |
+
>>> fs2['a', 'b', 'c', 'a', 'b', 'd']
|
581 |
+
5
|
582 |
+
>>> fs2[tuple('abcabcabcabcabcabcabcabcabcabca')]
|
583 |
+
(1)[b=[c=[a->(1)], d=5], e=11]
|
584 |
+
|
585 |
+
Indexing requires strings, `Feature`\s, or tuples; other types raise a
|
586 |
+
TypeError:
|
587 |
+
|
588 |
+
>>> fs2[12]
|
589 |
+
Traceback (most recent call last):
|
590 |
+
. . .
|
591 |
+
TypeError: Expected feature name or path. Got 12.
|
592 |
+
>>> fs2[list('abc')]
|
593 |
+
Traceback (most recent call last):
|
594 |
+
. . .
|
595 |
+
TypeError: Expected feature name or path. Got ['a', 'b', 'c'].
|
596 |
+
|
597 |
+
Feature paths can also be used with `get()`, `has_key()`, and
|
598 |
+
`__contains__()`.
|
599 |
+
|
600 |
+
>>> fpath1 = tuple('abcabc')
|
601 |
+
>>> fpath2 = tuple('abcabz')
|
602 |
+
>>> fs2.get(fpath1), fs2.get(fpath2)
|
603 |
+
((1)[a=[b=[c->(1), d=5], e=11]], None)
|
604 |
+
>>> fpath1 in fs2, fpath2 in fs2
|
605 |
+
(True, False)
|
606 |
+
>>> fs2.has_key(fpath1), fs2.has_key(fpath2)
|
607 |
+
(True, False)
|
608 |
+
|
609 |
+
..
|
610 |
+
>>> del fs1, fs2 # clean-up
|
611 |
+
|
612 |
+
Reading Feature Structures
|
613 |
+
--------------------------
|
614 |
+
|
615 |
+
Empty feature struct:
|
616 |
+
|
617 |
+
>>> FeatStruct('[]')
|
618 |
+
[]
|
619 |
+
|
620 |
+
Test features with integer values:
|
621 |
+
|
622 |
+
>>> FeatStruct('[a=12, b=-33, c=0]')
|
623 |
+
[a=12, b=-33, c=0]
|
624 |
+
|
625 |
+
Test features with string values. Either single or double quotes may
|
626 |
+
be used. Strings are evaluated just like python strings -- in
|
627 |
+
particular, you can use escape sequences and 'u' and 'r' prefixes, and
|
628 |
+
triple-quoted strings.
|
629 |
+
|
630 |
+
>>> FeatStruct('[a="", b="hello", c="\'", d=\'\', e=\'"\']')
|
631 |
+
[a='', b='hello', c="'", d='', e='"']
|
632 |
+
>>> FeatStruct(r'[a="\\", b="\"", c="\x6f\\y", d="12"]')
|
633 |
+
[a='\\', b='"', c='o\\y', d='12']
|
634 |
+
>>> FeatStruct(r'[b=r"a\b\c"]')
|
635 |
+
[b='a\\b\\c']
|
636 |
+
>>> FeatStruct('[x="""a"""]')
|
637 |
+
[x='a']
|
638 |
+
|
639 |
+
Test parsing of reentrant feature structures.
|
640 |
+
|
641 |
+
>>> FeatStruct('[a=(1)[], b->(1)]')
|
642 |
+
[a=(1)[], b->(1)]
|
643 |
+
>>> FeatStruct('[a=(1)[x=1, y=2], b->(1)]')
|
644 |
+
[a=(1)[x=1, y=2], b->(1)]
|
645 |
+
|
646 |
+
Test parsing of cyclic feature structures.
|
647 |
+
|
648 |
+
>>> FeatStruct('[a=(1)[b->(1)]]')
|
649 |
+
[a=(1)[b->(1)]]
|
650 |
+
>>> FeatStruct('(1)[a=[b=[c->(1)]]]')
|
651 |
+
(1)[a=[b=[c->(1)]]]
|
652 |
+
|
653 |
+
Strings of the form "+name" and "-name" may be used to specify boolean
|
654 |
+
values.
|
655 |
+
|
656 |
+
>>> FeatStruct('[-bar, +baz, +foo]')
|
657 |
+
[-bar, +baz, +foo]
|
658 |
+
|
659 |
+
None, True, and False are recognized as values:
|
660 |
+
|
661 |
+
>>> FeatStruct('[bar=True, baz=False, foo=None]')
|
662 |
+
[+bar, -baz, foo=None]
|
663 |
+
|
664 |
+
Special features:
|
665 |
+
|
666 |
+
>>> FeatStruct('NP/VP')
|
667 |
+
NP[]/VP[]
|
668 |
+
>>> FeatStruct('?x/?x')
|
669 |
+
?x[]/?x[]
|
670 |
+
>>> print(FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
|
671 |
+
[ *type* = 'VP' ]
|
672 |
+
[ ]
|
673 |
+
[ [ *type* = 'NP' ] ]
|
674 |
+
[ *slash* = [ agr = ?x ] ]
|
675 |
+
[ [ pl = True ] ]
|
676 |
+
[ ]
|
677 |
+
[ agr = ?x ]
|
678 |
+
[ fin = True ]
|
679 |
+
[ tense = 'past' ]
|
680 |
+
|
681 |
+
Here the slash feature gets coerced:
|
682 |
+
|
683 |
+
>>> FeatStruct('[*slash*=a, x=b, *type*="NP"]')
|
684 |
+
NP[x='b']/a[]
|
685 |
+
|
686 |
+
>>> FeatStruct('NP[sem=<bob>]/NP')
|
687 |
+
NP[sem=<bob>]/NP[]
|
688 |
+
>>> FeatStruct('S[sem=<walk(bob)>]')
|
689 |
+
S[sem=<walk(bob)>]
|
690 |
+
>>> print(FeatStruct('NP[sem=<bob>]/NP'))
|
691 |
+
[ *type* = 'NP' ]
|
692 |
+
[ ]
|
693 |
+
[ *slash* = [ *type* = 'NP' ] ]
|
694 |
+
[ ]
|
695 |
+
[ sem = <bob> ]
|
696 |
+
|
697 |
+
Playing with ranges:
|
698 |
+
|
699 |
+
>>> from nltk.featstruct import RangeFeature, FeatStructReader
|
700 |
+
>>> width = RangeFeature('width')
|
701 |
+
>>> reader = FeatStructReader([width])
|
702 |
+
>>> fs1 = reader.fromstring('[*width*=-5:12]')
|
703 |
+
>>> fs2 = reader.fromstring('[*width*=2:123]')
|
704 |
+
>>> fs3 = reader.fromstring('[*width*=-7:-2]')
|
705 |
+
>>> fs1.unify(fs2)
|
706 |
+
[*width*=(2, 12)]
|
707 |
+
>>> fs1.unify(fs3)
|
708 |
+
[*width*=(-5, -2)]
|
709 |
+
>>> print(fs2.unify(fs3)) # no overlap in width.
|
710 |
+
None
|
711 |
+
|
712 |
+
The slash feature has a default value of 'False':
|
713 |
+
|
714 |
+
>>> print(FeatStruct('NP[]/VP').unify(FeatStruct('NP[]'), trace=1))
|
715 |
+
<BLANKLINE>
|
716 |
+
Unification trace:
|
717 |
+
/ NP[]/VP[]
|
718 |
+
|\ NP[]
|
719 |
+
|
|
720 |
+
| Unify feature: *type*
|
721 |
+
| / 'NP'
|
722 |
+
| |\ 'NP'
|
723 |
+
| |
|
724 |
+
| +-->'NP'
|
725 |
+
|
|
726 |
+
| Unify feature: *slash*
|
727 |
+
| / VP[]
|
728 |
+
| |\ False
|
729 |
+
| |
|
730 |
+
X X <-- FAIL
|
731 |
+
None
|
732 |
+
|
733 |
+
The demo structures from category.py. They all parse, but they don't
|
734 |
+
do quite the right thing, -- ?x vs x.
|
735 |
+
|
736 |
+
>>> FeatStruct(pos='n', agr=FeatStruct(number='pl', gender='f'))
|
737 |
+
[agr=[gender='f', number='pl'], pos='n']
|
738 |
+
>>> FeatStruct(r'NP[sem=<bob>]/NP')
|
739 |
+
NP[sem=<bob>]/NP[]
|
740 |
+
>>> FeatStruct(r'S[sem=<app(?x, ?y)>]')
|
741 |
+
S[sem=<?x(?y)>]
|
742 |
+
>>> FeatStruct('?x/?x')
|
743 |
+
?x[]/?x[]
|
744 |
+
>>> FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')
|
745 |
+
VP[agr=?x, +fin, tense='past']/NP[agr=?x, +pl]
|
746 |
+
>>> FeatStruct('S[sem = <app(?subj, ?vp)>]')
|
747 |
+
S[sem=<?subj(?vp)>]
|
748 |
+
|
749 |
+
>>> FeatStruct('S')
|
750 |
+
S[]
|
751 |
+
|
752 |
+
The parser also includes support for reading sets and tuples.
|
753 |
+
|
754 |
+
>>> FeatStruct('[x={1,2,2,2}, y={/}]')
|
755 |
+
[x={1, 2}, y={/}]
|
756 |
+
>>> FeatStruct('[x=(1,2,2,2), y=()]')
|
757 |
+
[x=(1, 2, 2, 2), y=()]
|
758 |
+
>>> print(FeatStruct('[x=(1,[z=(1,2,?x)],?z,{/})]'))
|
759 |
+
[ x = (1, [ z = (1, 2, ?x) ], ?z, {/}) ]
|
760 |
+
|
761 |
+
Note that we can't put a featstruct inside a tuple, because doing so
|
762 |
+
would hash it, and it's not frozen yet:
|
763 |
+
|
764 |
+
>>> print(FeatStruct('[x={[]}]'))
|
765 |
+
Traceback (most recent call last):
|
766 |
+
. . .
|
767 |
+
TypeError: FeatStructs must be frozen before they can be hashed.
|
768 |
+
|
769 |
+
There's a special syntax for taking the union of sets: "{...+...}".
|
770 |
+
The elements should only be variables or sets.
|
771 |
+
|
772 |
+
>>> FeatStruct('[x={?a+?b+{1,2,3}}]')
|
773 |
+
[x={?a+?b+{1, 2, 3}}]
|
774 |
+
|
775 |
+
There's a special syntax for taking the concatenation of tuples:
|
776 |
+
"(...+...)". The elements should only be variables or tuples.
|
777 |
+
|
778 |
+
>>> FeatStruct('[x=(?a+?b+(1,2,3))]')
|
779 |
+
[x=(?a+?b+(1, 2, 3))]
|
780 |
+
|
781 |
+
Parsing gives helpful messages if your string contains an error.
|
782 |
+
|
783 |
+
>>> FeatStruct('[a=, b=5]]')
|
784 |
+
Traceback (most recent call last):
|
785 |
+
. . .
|
786 |
+
ValueError: Error parsing feature structure
|
787 |
+
[a=, b=5]]
|
788 |
+
^ Expected value
|
789 |
+
>>> FeatStruct('[a=12 22, b=33]')
|
790 |
+
Traceback (most recent call last):
|
791 |
+
. . .
|
792 |
+
ValueError: Error parsing feature structure
|
793 |
+
[a=12 22, b=33]
|
794 |
+
^ Expected comma
|
795 |
+
>>> FeatStruct('[a=5] [b=6]')
|
796 |
+
Traceback (most recent call last):
|
797 |
+
. . .
|
798 |
+
ValueError: Error parsing feature structure
|
799 |
+
[a=5] [b=6]
|
800 |
+
^ Expected end of string
|
801 |
+
>>> FeatStruct(' *++*')
|
802 |
+
Traceback (most recent call last):
|
803 |
+
. . .
|
804 |
+
ValueError: Error parsing feature structure
|
805 |
+
*++*
|
806 |
+
^ Expected open bracket or identifier
|
807 |
+
>>> FeatStruct('[x->(1)]')
|
808 |
+
Traceback (most recent call last):
|
809 |
+
. . .
|
810 |
+
ValueError: Error parsing feature structure
|
811 |
+
[x->(1)]
|
812 |
+
^ Expected bound identifier
|
813 |
+
>>> FeatStruct('[x->y]')
|
814 |
+
Traceback (most recent call last):
|
815 |
+
. . .
|
816 |
+
ValueError: Error parsing feature structure
|
817 |
+
[x->y]
|
818 |
+
^ Expected identifier
|
819 |
+
>>> FeatStruct('')
|
820 |
+
Traceback (most recent call last):
|
821 |
+
. . .
|
822 |
+
ValueError: Error parsing feature structure
|
823 |
+
<BLANKLINE>
|
824 |
+
^ Expected open bracket or identifier
|
825 |
+
|
826 |
+
|
827 |
+
Unification
|
828 |
+
-----------
|
829 |
+
Very simple unifications give the expected results:
|
830 |
+
|
831 |
+
>>> FeatStruct().unify(FeatStruct())
|
832 |
+
[]
|
833 |
+
>>> FeatStruct(number='singular').unify(FeatStruct())
|
834 |
+
[number='singular']
|
835 |
+
>>> FeatStruct().unify(FeatStruct(number='singular'))
|
836 |
+
[number='singular']
|
837 |
+
>>> FeatStruct(number='singular').unify(FeatStruct(person=3))
|
838 |
+
[number='singular', person=3]
|
839 |
+
|
840 |
+
Merging nested structures:
|
841 |
+
|
842 |
+
>>> fs1 = FeatStruct('[A=[B=b]]')
|
843 |
+
>>> fs2 = FeatStruct('[A=[C=c]]')
|
844 |
+
>>> fs1.unify(fs2)
|
845 |
+
[A=[B='b', C='c']]
|
846 |
+
>>> fs2.unify(fs1)
|
847 |
+
[A=[B='b', C='c']]
|
848 |
+
|
849 |
+
A basic case of reentrant unification
|
850 |
+
|
851 |
+
>>> fs4 = FeatStruct('[A=(1)[B=b], E=[F->(1)]]')
|
852 |
+
>>> fs5 = FeatStruct("[A=[C='c'], E=[F=[D='d']]]")
|
853 |
+
>>> fs4.unify(fs5)
|
854 |
+
[A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
|
855 |
+
>>> fs5.unify(fs4)
|
856 |
+
[A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
|
857 |
+
|
858 |
+
More than 2 paths to a value
|
859 |
+
|
860 |
+
>>> fs1 = FeatStruct("[a=[],b=[],c=[],d=[]]")
|
861 |
+
>>> fs2 = FeatStruct('[a=(1)[], b->(1), c->(1), d->(1)]')
|
862 |
+
>>> fs1.unify(fs2)
|
863 |
+
[a=(1)[], b->(1), c->(1), d->(1)]
|
864 |
+
|
865 |
+
fs1[a] gets unified with itself
|
866 |
+
|
867 |
+
>>> fs1 = FeatStruct('[x=(1)[], y->(1)]')
|
868 |
+
>>> fs2 = FeatStruct('[x=(1)[], y->(1)]')
|
869 |
+
>>> fs1.unify(fs2)
|
870 |
+
[x=(1)[], y->(1)]
|
871 |
+
|
872 |
+
Bound variables should get forwarded appropriately
|
873 |
+
|
874 |
+
>>> fs1 = FeatStruct('[A=(1)[X=x], B->(1), C=?cvar, D=?dvar]')
|
875 |
+
>>> fs2 = FeatStruct('[A=(1)[Y=y], B=(2)[Z=z], C->(1), D->(2)]')
|
876 |
+
>>> fs1.unify(fs2)
|
877 |
+
[A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
|
878 |
+
>>> fs2.unify(fs1)
|
879 |
+
[A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
|
880 |
+
|
881 |
+
Cyclic structure created by unification.
|
882 |
+
|
883 |
+
>>> fs1 = FeatStruct('[F=(1)[], G->(1)]')
|
884 |
+
>>> fs2 = FeatStruct('[F=[H=(2)[]], G->(2)]')
|
885 |
+
>>> fs3 = fs1.unify(fs2)
|
886 |
+
>>> fs3
|
887 |
+
[F=(1)[H->(1)], G->(1)]
|
888 |
+
>>> fs3['F'] is fs3['G']
|
889 |
+
True
|
890 |
+
>>> fs3['F'] is fs3['G']['H']
|
891 |
+
True
|
892 |
+
>>> fs3['F'] is fs3['G']['H']['H']
|
893 |
+
True
|
894 |
+
>>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
|
895 |
+
True
|
896 |
+
|
897 |
+
Cyclic structure created w/ variables.
|
898 |
+
|
899 |
+
>>> fs1 = FeatStruct('[F=[H=?x]]')
|
900 |
+
>>> fs2 = FeatStruct('[F=?x]')
|
901 |
+
>>> fs3 = fs1.unify(fs2, rename_vars=False)
|
902 |
+
>>> fs3
|
903 |
+
[F=(1)[H->(1)]]
|
904 |
+
>>> fs3['F'] is fs3['F']['H']
|
905 |
+
True
|
906 |
+
>>> fs3['F'] is fs3['F']['H']['H']
|
907 |
+
True
|
908 |
+
>>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
|
909 |
+
True
|
910 |
+
|
911 |
+
Unifying w/ a cyclic feature structure.
|
912 |
+
|
913 |
+
>>> fs4 = FeatStruct('[F=[H=[H=[H=(1)[]]]], K->(1)]')
|
914 |
+
>>> fs3.unify(fs4)
|
915 |
+
[F=(1)[H->(1)], K->(1)]
|
916 |
+
>>> fs4.unify(fs3)
|
917 |
+
[F=(1)[H->(1)], K->(1)]
|
918 |
+
|
919 |
+
Variable bindings should preserve reentrance.
|
920 |
+
|
921 |
+
>>> bindings = {}
|
922 |
+
>>> fs1 = FeatStruct("[a=?x]")
|
923 |
+
>>> fs2 = fs1.unify(FeatStruct("[a=[]]"), bindings)
|
924 |
+
>>> fs2['a'] is bindings[Variable('?x')]
|
925 |
+
True
|
926 |
+
>>> fs2.unify(FeatStruct("[b=?x]"), bindings)
|
927 |
+
[a=(1)[], b->(1)]
|
928 |
+
|
929 |
+
Aliased variable tests
|
930 |
+
|
931 |
+
>>> fs1 = FeatStruct("[a=?x, b=?x]")
|
932 |
+
>>> fs2 = FeatStruct("[b=?y, c=?y]")
|
933 |
+
>>> bindings = {}
|
934 |
+
>>> fs3 = fs1.unify(fs2, bindings)
|
935 |
+
>>> fs3
|
936 |
+
[a=?x, b=?x, c=?x]
|
937 |
+
>>> bindings
|
938 |
+
{Variable('?y'): Variable('?x')}
|
939 |
+
>>> fs3.unify(FeatStruct("[a=1]"))
|
940 |
+
[a=1, b=1, c=1]
|
941 |
+
|
942 |
+
If we keep track of the bindings, then we can use the same variable
|
943 |
+
over multiple calls to unify.
|
944 |
+
|
945 |
+
>>> bindings = {}
|
946 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
947 |
+
>>> fs2 = fs1.unify(FeatStruct('[a=[]]'), bindings)
|
948 |
+
>>> fs2.unify(FeatStruct('[b=?x]'), bindings)
|
949 |
+
[a=(1)[], b->(1)]
|
950 |
+
>>> bindings
|
951 |
+
{Variable('?x'): []}
|
952 |
+
|
953 |
+
..
|
954 |
+
>>> del fs1, fs2, fs3, fs4, fs5 # clean-up
|
955 |
+
|
956 |
+
Unification Bindings
|
957 |
+
--------------------
|
958 |
+
|
959 |
+
>>> bindings = {}
|
960 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
961 |
+
>>> fs2 = FeatStruct('[a=12]')
|
962 |
+
>>> fs3 = FeatStruct('[b=?x]')
|
963 |
+
>>> fs1.unify(fs2, bindings)
|
964 |
+
[a=12]
|
965 |
+
>>> bindings
|
966 |
+
{Variable('?x'): 12}
|
967 |
+
>>> fs3.substitute_bindings(bindings)
|
968 |
+
[b=12]
|
969 |
+
>>> fs3 # substitute_bindings didn't mutate fs3.
|
970 |
+
[b=?x]
|
971 |
+
>>> fs2.unify(fs3, bindings)
|
972 |
+
[a=12, b=12]
|
973 |
+
|
974 |
+
>>> bindings = {}
|
975 |
+
>>> fs1 = FeatStruct('[a=?x, b=1]')
|
976 |
+
>>> fs2 = FeatStruct('[a=5, b=?x]')
|
977 |
+
>>> fs1.unify(fs2, bindings)
|
978 |
+
[a=5, b=1]
|
979 |
+
>>> sorted(bindings.items())
|
980 |
+
[(Variable('?x'), 5), (Variable('?x2'), 1)]
|
981 |
+
|
982 |
+
..
|
983 |
+
>>> del fs1, fs2, fs3 # clean-up
|
984 |
+
|
985 |
+
Expressions
|
986 |
+
-----------
|
987 |
+
|
988 |
+
>>> e = Expression.fromstring('\\P y.P(z,y)')
|
989 |
+
>>> fs1 = FeatStruct(x=e, y=Variable('z'))
|
990 |
+
>>> fs2 = FeatStruct(y=VariableExpression(Variable('John')))
|
991 |
+
>>> fs1.unify(fs2)
|
992 |
+
[x=<\P y.P(John,y)>, y=<John>]
|
993 |
+
|
994 |
+
Remove Variables
|
995 |
+
----------------
|
996 |
+
|
997 |
+
>>> FeatStruct('[a=?x, b=12, c=[d=?y]]').remove_variables()
|
998 |
+
[b=12, c=[]]
|
999 |
+
>>> FeatStruct('(1)[a=[b=?x,c->(1)]]').remove_variables()
|
1000 |
+
(1)[a=[c->(1)]]
|
1001 |
+
|
1002 |
+
Equality & Hashing
|
1003 |
+
------------------
|
1004 |
+
The `equal_values` method checks whether two feature structures assign
|
1005 |
+
the same value to every feature. If the optional argument
|
1006 |
+
``check_reentrances`` is supplied, then it also returns false if there
|
1007 |
+
is any difference in the reentrances.
|
1008 |
+
|
1009 |
+
>>> a = FeatStruct('(1)[x->(1)]')
|
1010 |
+
>>> b = FeatStruct('(1)[x->(1)]')
|
1011 |
+
>>> c = FeatStruct('(1)[x=[x->(1)]]')
|
1012 |
+
>>> d = FeatStruct('[x=(1)[x->(1)]]')
|
1013 |
+
>>> e = FeatStruct('(1)[x=[x->(1), y=1], y=1]')
|
1014 |
+
>>> def compare(x,y):
|
1015 |
+
... assert x.equal_values(y, True) == y.equal_values(x, True)
|
1016 |
+
... assert x.equal_values(y, False) == y.equal_values(x, False)
|
1017 |
+
... if x.equal_values(y, True):
|
1018 |
+
... assert x.equal_values(y, False)
|
1019 |
+
... print('equal values, same reentrance')
|
1020 |
+
... elif x.equal_values(y, False):
|
1021 |
+
... print('equal values, different reentrance')
|
1022 |
+
... else:
|
1023 |
+
... print('different values')
|
1024 |
+
|
1025 |
+
>>> compare(a, a)
|
1026 |
+
equal values, same reentrance
|
1027 |
+
>>> compare(a, b)
|
1028 |
+
equal values, same reentrance
|
1029 |
+
>>> compare(a, c)
|
1030 |
+
equal values, different reentrance
|
1031 |
+
>>> compare(a, d)
|
1032 |
+
equal values, different reentrance
|
1033 |
+
>>> compare(c, d)
|
1034 |
+
equal values, different reentrance
|
1035 |
+
>>> compare(a, e)
|
1036 |
+
different values
|
1037 |
+
>>> compare(c, e)
|
1038 |
+
different values
|
1039 |
+
>>> compare(d, e)
|
1040 |
+
different values
|
1041 |
+
>>> compare(e, e)
|
1042 |
+
equal values, same reentrance
|
1043 |
+
|
1044 |
+
Feature structures may not be hashed until they are frozen:
|
1045 |
+
|
1046 |
+
>>> hash(a)
|
1047 |
+
Traceback (most recent call last):
|
1048 |
+
. . .
|
1049 |
+
TypeError: FeatStructs must be frozen before they can be hashed.
|
1050 |
+
>>> a.freeze()
|
1051 |
+
>>> v = hash(a)
|
1052 |
+
|
1053 |
+
Feature structures define hash consistently. The following example
|
1054 |
+
looks at the hash value for each (fs1,fs2) pair; if their hash values
|
1055 |
+
are not equal, then they must not be equal. If their hash values are
|
1056 |
+
equal, then display a message, and indicate whether their values are
|
1057 |
+
indeed equal. Note that c and d currently have the same hash value,
|
1058 |
+
even though they are not equal. That is not a bug, strictly speaking,
|
1059 |
+
but it wouldn't be a bad thing if it changed.
|
1060 |
+
|
1061 |
+
>>> for fstruct in (a, b, c, d, e):
|
1062 |
+
... fstruct.freeze()
|
1063 |
+
>>> for fs1_name in 'abcde':
|
1064 |
+
... for fs2_name in 'abcde':
|
1065 |
+
... fs1 = locals()[fs1_name]
|
1066 |
+
... fs2 = locals()[fs2_name]
|
1067 |
+
... if hash(fs1) != hash(fs2):
|
1068 |
+
... assert fs1 != fs2
|
1069 |
+
... else:
|
1070 |
+
... print('%s and %s have the same hash value,' %
|
1071 |
+
... (fs1_name, fs2_name))
|
1072 |
+
... if fs1 == fs2: print('and are equal')
|
1073 |
+
... else: print('and are not equal')
|
1074 |
+
a and a have the same hash value, and are equal
|
1075 |
+
a and b have the same hash value, and are equal
|
1076 |
+
b and a have the same hash value, and are equal
|
1077 |
+
b and b have the same hash value, and are equal
|
1078 |
+
c and c have the same hash value, and are equal
|
1079 |
+
c and d have the same hash value, and are not equal
|
1080 |
+
d and c have the same hash value, and are not equal
|
1081 |
+
d and d have the same hash value, and are equal
|
1082 |
+
e and e have the same hash value, and are equal
|
1083 |
+
|
1084 |
+
..
|
1085 |
+
>>> del a, b, c, d, e, v # clean-up
|
1086 |
+
|
1087 |
+
Tracing
|
1088 |
+
-------
|
1089 |
+
|
1090 |
+
>>> fs1 = FeatStruct('[a=[b=(1)[], c=?x], d->(1), e=[f=?x]]')
|
1091 |
+
>>> fs2 = FeatStruct('[a=(1)[c="C"], e=[g->(1)]]')
|
1092 |
+
>>> fs1.unify(fs2, trace=True)
|
1093 |
+
<BLANKLINE>
|
1094 |
+
Unification trace:
|
1095 |
+
/ [a=[b=(1)[], c=?x], d->(1), e=[f=?x]]
|
1096 |
+
|\ [a=(1)[c='C'], e=[g->(1)]]
|
1097 |
+
|
|
1098 |
+
| Unify feature: a
|
1099 |
+
| / [b=[], c=?x]
|
1100 |
+
| |\ [c='C']
|
1101 |
+
| |
|
1102 |
+
| | Unify feature: a.c
|
1103 |
+
| | / ?x
|
1104 |
+
| | |\ 'C'
|
1105 |
+
| | |
|
1106 |
+
| | +-->Variable('?x')
|
1107 |
+
| |
|
1108 |
+
| +-->[b=[], c=?x]
|
1109 |
+
| Bindings: {?x: 'C'}
|
1110 |
+
|
|
1111 |
+
| Unify feature: e
|
1112 |
+
| / [f=?x]
|
1113 |
+
| |\ [g=[c='C']]
|
1114 |
+
| |
|
1115 |
+
| +-->[f=?x, g=[b=[], c=?x]]
|
1116 |
+
| Bindings: {?x: 'C'}
|
1117 |
+
|
|
1118 |
+
+-->[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
|
1119 |
+
Bindings: {?x: 'C'}
|
1120 |
+
[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
|
1121 |
+
>>>
|
1122 |
+
>>> fs1 = FeatStruct('[a=?x, b=?z, c=?z]')
|
1123 |
+
>>> fs2 = FeatStruct('[a=?y, b=?y, c=?q]')
|
1124 |
+
>>> #fs1.unify(fs2, trace=True)
|
1125 |
+
>>>
|
1126 |
+
|
1127 |
+
..
|
1128 |
+
>>> del fs1, fs2 # clean-up
|
1129 |
+
|
1130 |
+
Unification on Dicts & Lists
|
1131 |
+
----------------------------
|
1132 |
+
It's possible to do unification on dictionaries:
|
1133 |
+
|
1134 |
+
>>> from nltk.featstruct import unify
|
1135 |
+
>>> pprint(unify(dict(x=1, y=dict(z=2)), dict(x=1, q=5)), width=1)
|
1136 |
+
{'q': 5, 'x': 1, 'y': {'z': 2}}
|
1137 |
+
|
1138 |
+
It's possible to do unification on lists as well:
|
1139 |
+
|
1140 |
+
>>> unify([1, 2, 3], [1, Variable('x'), 3])
|
1141 |
+
[1, 2, 3]
|
1142 |
+
|
1143 |
+
Mixing dicts and lists is fine:
|
1144 |
+
|
1145 |
+
>>> pprint(unify([dict(x=1, y=dict(z=2)),3], [dict(x=1, q=5),3]),
|
1146 |
+
... width=1)
|
1147 |
+
[{'q': 5, 'x': 1, 'y': {'z': 2}}, 3]
|
1148 |
+
|
1149 |
+
Mixing dicts and FeatStructs is discouraged:
|
1150 |
+
|
1151 |
+
>>> unify(dict(x=1), FeatStruct(x=1))
|
1152 |
+
Traceback (most recent call last):
|
1153 |
+
. . .
|
1154 |
+
ValueError: Mixing FeatStruct objects with Python dicts and lists is not supported.
|
1155 |
+
|
1156 |
+
But you can do it if you really want, by explicitly stating that both
|
1157 |
+
dictionaries and FeatStructs should be treated as feature structures:
|
1158 |
+
|
1159 |
+
>>> unify(dict(x=1), FeatStruct(x=1), fs_class=(dict, FeatStruct))
|
1160 |
+
{'x': 1}
|
1161 |
+
|
1162 |
+
Finding Conflicts
|
1163 |
+
-----------------
|
1164 |
+
|
1165 |
+
>>> from nltk.featstruct import conflicts
|
1166 |
+
>>> fs1 = FeatStruct('[a=[b=(1)[c=2], d->(1), e=[f->(1)]]]')
|
1167 |
+
>>> fs2 = FeatStruct('[a=[b=[c=[x=5]], d=[c=2], e=[f=[c=3]]]]')
|
1168 |
+
>>> for path in conflicts(fs1, fs2):
|
1169 |
+
... print('%-8s: %r vs %r' % ('.'.join(path), fs1[path], fs2[path]))
|
1170 |
+
a.b.c : 2 vs [x=5]
|
1171 |
+
a.e.f.c : 2 vs 3
|
1172 |
+
|
1173 |
+
..
|
1174 |
+
>>> del fs1, fs2 # clean-up
|
1175 |
+
|
1176 |
+
Retracting Bindings
|
1177 |
+
-------------------
|
1178 |
+
|
1179 |
+
>>> from nltk.featstruct import retract_bindings
|
1180 |
+
>>> bindings = {}
|
1181 |
+
>>> fs1 = FeatStruct('[a=?x, b=[c=?y]]')
|
1182 |
+
>>> fs2 = FeatStruct('[a=(1)[c=[d=1]], b->(1)]')
|
1183 |
+
>>> fs3 = fs1.unify(fs2, bindings)
|
1184 |
+
>>> print(fs3)
|
1185 |
+
[ a = (1) [ c = [ d = 1 ] ] ]
|
1186 |
+
[ ]
|
1187 |
+
[ b -> (1) ]
|
1188 |
+
>>> pprint(bindings)
|
1189 |
+
{Variable('?x'): [c=[d=1]], Variable('?y'): [d=1]}
|
1190 |
+
>>> retract_bindings(fs3, bindings)
|
1191 |
+
[a=?x, b=?x]
|
1192 |
+
>>> pprint(bindings)
|
1193 |
+
{Variable('?x'): [c=?y], Variable('?y'): [d=1]}
|
1194 |
+
|
1195 |
+
Squashed Bugs
|
1196 |
+
~~~~~~~~~~~~~
|
1197 |
+
In svn rev 5167, unifying two feature structures that used the same
|
1198 |
+
variable would cause those variables to become aliased in the output.
|
1199 |
+
|
1200 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
1201 |
+
>>> fs2 = FeatStruct('[b=?x]')
|
1202 |
+
>>> fs1.unify(fs2)
|
1203 |
+
[a=?x, b=?x2]
|
1204 |
+
|
1205 |
+
There was a bug in svn revision 5172 that caused `rename_variables` to
|
1206 |
+
rename variables to names that are already used.
|
1207 |
+
|
1208 |
+
>>> FeatStruct('[a=?x, b=?x2]').rename_variables(
|
1209 |
+
... vars=[Variable('?x')])
|
1210 |
+
[a=?x3, b=?x2]
|
1211 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
1212 |
+
>>> fs2 = FeatStruct('[a=?x, b=?x2]')
|
1213 |
+
>>> fs1.unify(fs2)
|
1214 |
+
[a=?x, b=?x2]
|
1215 |
+
|
1216 |
+
There was a bug in svn rev 5167 that caused us to get the following
|
1217 |
+
example wrong. Basically the problem was that we only followed
|
1218 |
+
'forward' pointers for other, not self, when unifying two feature
|
1219 |
+
structures. (nb: this test assumes that features are unified in
|
1220 |
+
alphabetical order -- if they are not, it might pass even if the bug
|
1221 |
+
is present.)
|
1222 |
+
|
1223 |
+
>>> fs1 = FeatStruct('[a=[x=1], b=?x, c=?x]')
|
1224 |
+
>>> fs2 = FeatStruct('[a=(1)[], b->(1), c=[x=2]]')
|
1225 |
+
>>> print(fs1.unify(fs2))
|
1226 |
+
None
|
1227 |
+
|
1228 |
+
..
|
1229 |
+
>>> del fs1, fs2 # clean-up
|
venv/lib/python3.10/site-packages/nltk/test/framenet.doctest
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
========
|
5 |
+
FrameNet
|
6 |
+
========
|
7 |
+
|
8 |
+
The FrameNet corpus is a lexical database of English that is both human-
|
9 |
+
and machine-readable, based on annotating examples of how words are used
|
10 |
+
in actual texts. FrameNet is based on a theory of meaning called Frame
|
11 |
+
Semantics, deriving from the work of Charles J. Fillmore and colleagues.
|
12 |
+
The basic idea is straightforward: that the meanings of most words can
|
13 |
+
best be understood on the basis of a semantic frame: a description of a
|
14 |
+
type of event, relation, or entity and the participants in it. For
|
15 |
+
example, the concept of cooking typically involves a person doing the
|
16 |
+
cooking (Cook), the food that is to be cooked (Food), something to hold
|
17 |
+
the food while cooking (Container) and a source of heat
|
18 |
+
(Heating_instrument). In the FrameNet project, this is represented as a
|
19 |
+
frame called Apply_heat, and the Cook, Food, Heating_instrument and
|
20 |
+
Container are called frame elements (FEs). Words that evoke this frame,
|
21 |
+
such as fry, bake, boil, and broil, are called lexical units (LUs) of
|
22 |
+
the Apply_heat frame. The job of FrameNet is to define the frames
|
23 |
+
and to annotate sentences to show how the FEs fit syntactically around
|
24 |
+
the word that evokes the frame.
|
25 |
+
|
26 |
+
------
|
27 |
+
Frames
|
28 |
+
------
|
29 |
+
|
30 |
+
A Frame is a script-like conceptual structure that describes a
|
31 |
+
particular type of situation, object, or event along with the
|
32 |
+
participants and props that are needed for that Frame. For
|
33 |
+
example, the "Apply_heat" frame describes a common situation
|
34 |
+
involving a Cook, some Food, and a Heating_Instrument, and is
|
35 |
+
evoked by words such as bake, blanch, boil, broil, brown,
|
36 |
+
simmer, steam, etc.
|
37 |
+
|
38 |
+
We call the roles of a Frame "frame elements" (FEs) and the
|
39 |
+
frame-evoking words are called "lexical units" (LUs).
|
40 |
+
|
41 |
+
FrameNet includes relations between Frames. Several types of
|
42 |
+
relations are defined, of which the most important are:
|
43 |
+
|
44 |
+
- Inheritance: An IS-A relation. The child frame is a subtype
|
45 |
+
of the parent frame, and each FE in the parent is bound to
|
46 |
+
a corresponding FE in the child. An example is the
|
47 |
+
"Revenge" frame which inherits from the
|
48 |
+
"Rewards_and_punishments" frame.
|
49 |
+
|
50 |
+
- Using: The child frame presupposes the parent frame as
|
51 |
+
background, e.g the "Speed" frame "uses" (or presupposes)
|
52 |
+
the "Motion" frame; however, not all parent FEs need to be
|
53 |
+
bound to child FEs.
|
54 |
+
|
55 |
+
- Subframe: The child frame is a subevent of a complex event
|
56 |
+
represented by the parent, e.g. the "Criminal_process" frame
|
57 |
+
has subframes of "Arrest", "Arraignment", "Trial", and
|
58 |
+
"Sentencing".
|
59 |
+
|
60 |
+
- Perspective_on: The child frame provides a particular
|
61 |
+
perspective on an un-perspectivized parent frame. A pair of
|
62 |
+
examples consists of the "Hiring" and "Get_a_job" frames,
|
63 |
+
which perspectivize the "Employment_start" frame from the
|
64 |
+
Employer's and the Employee's point of view, respectively.
|
65 |
+
|
66 |
+
To get a list of all of the Frames in FrameNet, you can use the
|
67 |
+
`frames()` function. If you supply a regular expression pattern to the
|
68 |
+
`frames()` function, you will get a list of all Frames whose names match
|
69 |
+
that pattern:
|
70 |
+
|
71 |
+
>>> from pprint import pprint
|
72 |
+
>>> from operator import itemgetter
|
73 |
+
>>> from nltk.corpus import framenet as fn
|
74 |
+
>>> from nltk.corpus.reader.framenet import PrettyList
|
75 |
+
>>> x = fn.frames(r'(?i)crim')
|
76 |
+
>>> x.sort(key=itemgetter('ID'))
|
77 |
+
>>> x
|
78 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
79 |
+
>>> PrettyList(sorted(x, key=itemgetter('ID')))
|
80 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
81 |
+
|
82 |
+
To get the details of a particular Frame, you can use the `frame()`
|
83 |
+
function passing in the frame number:
|
84 |
+
|
85 |
+
>>> from pprint import pprint
|
86 |
+
>>> from nltk.corpus import framenet as fn
|
87 |
+
>>> f = fn.frame(202)
|
88 |
+
>>> f.ID
|
89 |
+
202
|
90 |
+
>>> f.name
|
91 |
+
'Arrest'
|
92 |
+
>>> f.definition
|
93 |
+
"Authorities charge a Suspect, who is under suspicion of having committed a crime..."
|
94 |
+
>>> len(f.lexUnit)
|
95 |
+
11
|
96 |
+
>>> pprint(sorted([x for x in f.FE]))
|
97 |
+
['Authorities',
|
98 |
+
'Charges',
|
99 |
+
'Co-participant',
|
100 |
+
'Manner',
|
101 |
+
'Means',
|
102 |
+
'Offense',
|
103 |
+
'Place',
|
104 |
+
'Purpose',
|
105 |
+
'Source_of_legal_authority',
|
106 |
+
'Suspect',
|
107 |
+
'Time',
|
108 |
+
'Type']
|
109 |
+
>>> pprint(f.frameRelations)
|
110 |
+
[<Parent=Intentionally_affect -- Inheritance -> Child=Arrest>, <Complex=Criminal_process -- Subframe -> Component=Arrest>, ...]
|
111 |
+
|
112 |
+
The `frame()` function shown above returns a dict object containing
|
113 |
+
detailed information about the Frame. See the documentation on the
|
114 |
+
`frame()` function for the specifics.
|
115 |
+
|
116 |
+
You can also search for Frames by their Lexical Units (LUs). The
|
117 |
+
`frames_by_lemma()` function returns a list of all frames that contain
|
118 |
+
LUs in which the 'name' attribute of the LU matches the given regular
|
119 |
+
expression. Note that LU names are composed of "lemma.POS", where the
|
120 |
+
"lemma" part can be made up of either a single lexeme (e.g. 'run') or
|
121 |
+
multiple lexemes (e.g. 'a little') (see below).
|
122 |
+
|
123 |
+
>>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID')))
|
124 |
+
[<frame ID=189 name=Quanti...>, <frame ID=2001 name=Degree>]
|
125 |
+
|
126 |
+
-------------
|
127 |
+
Lexical Units
|
128 |
+
-------------
|
129 |
+
|
130 |
+
A lexical unit (LU) is a pairing of a word with a meaning. For
|
131 |
+
example, the "Apply_heat" Frame describes a common situation
|
132 |
+
involving a Cook, some Food, and a Heating Instrument, and is
|
133 |
+
_evoked_ by words such as bake, blanch, boil, broil, brown,
|
134 |
+
simmer, steam, etc. These frame-evoking words are the LUs in the
|
135 |
+
Apply_heat frame. Each sense of a polysemous word is a different
|
136 |
+
LU.
|
137 |
+
|
138 |
+
We have used the word "word" in talking about LUs. The reality
|
139 |
+
is actually rather complex. When we say that the word "bake" is
|
140 |
+
polysemous, we mean that the lemma "bake.v" (which has the
|
141 |
+
word-forms "bake", "bakes", "baked", and "baking") is linked to
|
142 |
+
three different frames:
|
143 |
+
|
144 |
+
- Apply_heat: "Michelle baked the potatoes for 45 minutes."
|
145 |
+
|
146 |
+
- Cooking_creation: "Michelle baked her mother a cake for her birthday."
|
147 |
+
|
148 |
+
- Absorb_heat: "The potatoes have to bake for more than 30 minutes."
|
149 |
+
|
150 |
+
These constitute three different LUs, with different
|
151 |
+
definitions.
|
152 |
+
|
153 |
+
Multiword expressions such as "given name" and hyphenated words
|
154 |
+
like "shut-eye" can also be LUs. Idiomatic phrases such as
|
155 |
+
"middle of nowhere" and "give the slip (to)" are also defined as
|
156 |
+
LUs in the appropriate frames ("Isolated_places" and "Evading",
|
157 |
+
respectively), and their internal structure is not analyzed.
|
158 |
+
|
159 |
+
Framenet provides multiple annotated examples of each sense of a
|
160 |
+
word (i.e. each LU). Moreover, the set of examples
|
161 |
+
(approximately 20 per LU) illustrates all of the combinatorial
|
162 |
+
possibilities of the lexical unit.
|
163 |
+
|
164 |
+
Each LU is linked to a Frame, and hence to the other words which
|
165 |
+
evoke that Frame. This makes the FrameNet database similar to a
|
166 |
+
thesaurus, grouping together semantically similar words.
|
167 |
+
|
168 |
+
In the simplest case, frame-evoking words are verbs such as
|
169 |
+
"fried" in:
|
170 |
+
|
171 |
+
"Matilde fried the catfish in a heavy iron skillet."
|
172 |
+
|
173 |
+
Sometimes event nouns may evoke a Frame. For example,
|
174 |
+
"reduction" evokes "Cause_change_of_scalar_position" in:
|
175 |
+
|
176 |
+
"...the reduction of debt levels to $665 million from $2.6 billion."
|
177 |
+
|
178 |
+
Adjectives may also evoke a Frame. For example, "asleep" may
|
179 |
+
evoke the "Sleep" frame as in:
|
180 |
+
|
181 |
+
"They were asleep for hours."
|
182 |
+
|
183 |
+
Many common nouns, such as artifacts like "hat" or "tower",
|
184 |
+
typically serve as dependents rather than clearly evoking their
|
185 |
+
own frames.
|
186 |
+
|
187 |
+
Details for a specific lexical unit can be obtained using this class's
|
188 |
+
`lus()` function, which takes an optional regular expression
|
189 |
+
pattern that will be matched against the name of the lexical unit:
|
190 |
+
|
191 |
+
>>> from pprint import pprint
|
192 |
+
>>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')))
|
193 |
+
[<lu ID=14733 name=a little.n>, <lu ID=14743 name=a little.adv>, ...]
|
194 |
+
|
195 |
+
You can obtain detailed information on a particular LU by calling the
|
196 |
+
`lu()` function and passing in an LU's 'ID' number:
|
197 |
+
|
198 |
+
>>> from pprint import pprint
|
199 |
+
>>> from nltk.corpus import framenet as fn
|
200 |
+
>>> fn.lu(256).name
|
201 |
+
'foresee.v'
|
202 |
+
>>> fn.lu(256).definition
|
203 |
+
'COD: be aware of beforehand; predict.'
|
204 |
+
>>> fn.lu(256).frame.name
|
205 |
+
'Expectation'
|
206 |
+
>>> fn.lu(256).lexemes[0].name
|
207 |
+
'foresee'
|
208 |
+
|
209 |
+
Note that LU names take the form of a dotted string (e.g. "run.v" or "a
|
210 |
+
little.adv") in which a lemma precedes the "." and a part of speech
|
211 |
+
(POS) follows the dot. The lemma may be composed of a single lexeme
|
212 |
+
(e.g. "run") or of multiple lexemes (e.g. "a little"). The list of
|
213 |
+
POSs used in the LUs is:
|
214 |
+
|
215 |
+
v - verb
|
216 |
+
n - noun
|
217 |
+
a - adjective
|
218 |
+
adv - adverb
|
219 |
+
prep - preposition
|
220 |
+
num - numbers
|
221 |
+
intj - interjection
|
222 |
+
art - article
|
223 |
+
c - conjunction
|
224 |
+
scon - subordinating conjunction
|
225 |
+
|
226 |
+
For more detailed information about the info that is contained in the
|
227 |
+
dict that is returned by the `lu()` function, see the documentation on
|
228 |
+
the `lu()` function.
|
229 |
+
|
230 |
+
-------------------
|
231 |
+
Annotated Documents
|
232 |
+
-------------------
|
233 |
+
|
234 |
+
The FrameNet corpus contains a small set of annotated documents. A list
|
235 |
+
of these documents can be obtained by calling the `docs()` function:
|
236 |
+
|
237 |
+
>>> from pprint import pprint
|
238 |
+
>>> from nltk.corpus import framenet as fn
|
239 |
+
>>> d = fn.docs('BellRinging')[0]
|
240 |
+
>>> d.corpname
|
241 |
+
'PropBank'
|
242 |
+
>>> d.sentence[49]
|
243 |
+
full-text sentence (...) in BellRinging:
|
244 |
+
<BLANKLINE>
|
245 |
+
<BLANKLINE>
|
246 |
+
[POS] 17 tags
|
247 |
+
<BLANKLINE>
|
248 |
+
[POS_tagset] PENN
|
249 |
+
<BLANKLINE>
|
250 |
+
[text] + [annotationSet]
|
251 |
+
<BLANKLINE>
|
252 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
253 |
+
***** ******* *****
|
254 |
+
Desir Cause_t Cause
|
255 |
+
[1] [3] [2]
|
256 |
+
<BLANKLINE>
|
257 |
+
that fuller life .
|
258 |
+
******
|
259 |
+
Comple
|
260 |
+
[4]
|
261 |
+
(Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness)
|
262 |
+
<BLANKLINE>
|
263 |
+
|
264 |
+
>>> d.sentence[49].annotationSet[1]
|
265 |
+
annotation set (...):
|
266 |
+
<BLANKLINE>
|
267 |
+
[status] MANUAL
|
268 |
+
<BLANKLINE>
|
269 |
+
[LU] (6605) hope.n in Desiring
|
270 |
+
<BLANKLINE>
|
271 |
+
[frame] (366) Desiring
|
272 |
+
<BLANKLINE>
|
273 |
+
[GF] 2 relations
|
274 |
+
<BLANKLINE>
|
275 |
+
[PT] 2 phrases
|
276 |
+
<BLANKLINE>
|
277 |
+
[text] + [Target] + [FE] + [Noun]
|
278 |
+
<BLANKLINE>
|
279 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
280 |
+
- ^^^^ ^^ ***** ----------------------------------------------
|
281 |
+
E supp su Event
|
282 |
+
<BLANKLINE>
|
283 |
+
that fuller life .
|
284 |
+
-----------------
|
285 |
+
<BLANKLINE>
|
286 |
+
(E=Experiencer, su=supp)
|
287 |
+
<BLANKLINE>
|
288 |
+
<BLANKLINE>
|
venv/lib/python3.10/site-packages/nltk/test/generate.doctest
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===============================================
|
5 |
+
Generating sentences from context-free grammars
|
6 |
+
===============================================
|
7 |
+
|
8 |
+
An example grammar:
|
9 |
+
|
10 |
+
>>> from nltk.parse.generate import generate, demo_grammar
|
11 |
+
>>> from nltk import CFG
|
12 |
+
>>> grammar = CFG.fromstring(demo_grammar)
|
13 |
+
>>> print(grammar)
|
14 |
+
Grammar with 13 productions (start state = S)
|
15 |
+
S -> NP VP
|
16 |
+
NP -> Det N
|
17 |
+
PP -> P NP
|
18 |
+
VP -> 'slept'
|
19 |
+
VP -> 'saw' NP
|
20 |
+
VP -> 'walked' PP
|
21 |
+
Det -> 'the'
|
22 |
+
Det -> 'a'
|
23 |
+
N -> 'man'
|
24 |
+
N -> 'park'
|
25 |
+
N -> 'dog'
|
26 |
+
P -> 'in'
|
27 |
+
P -> 'with'
|
28 |
+
|
29 |
+
The first 10 generated sentences:
|
30 |
+
|
31 |
+
>>> for sentence in generate(grammar, n=10):
|
32 |
+
... print(' '.join(sentence))
|
33 |
+
the man slept
|
34 |
+
the man saw the man
|
35 |
+
the man saw the park
|
36 |
+
the man saw the dog
|
37 |
+
the man saw a man
|
38 |
+
the man saw a park
|
39 |
+
the man saw a dog
|
40 |
+
the man walked in the man
|
41 |
+
the man walked in the park
|
42 |
+
the man walked in the dog
|
43 |
+
|
44 |
+
All sentences of max depth 4:
|
45 |
+
|
46 |
+
>>> for sentence in generate(grammar, depth=4):
|
47 |
+
... print(' '.join(sentence))
|
48 |
+
the man slept
|
49 |
+
the park slept
|
50 |
+
the dog slept
|
51 |
+
a man slept
|
52 |
+
a park slept
|
53 |
+
a dog slept
|
54 |
+
|
55 |
+
The number of sentences of different max depths:
|
56 |
+
|
57 |
+
>>> len(list(generate(grammar, depth=3)))
|
58 |
+
0
|
59 |
+
>>> len(list(generate(grammar, depth=4)))
|
60 |
+
6
|
61 |
+
>>> len(list(generate(grammar, depth=5)))
|
62 |
+
42
|
63 |
+
>>> len(list(generate(grammar, depth=6)))
|
64 |
+
114
|
65 |
+
>>> len(list(generate(grammar)))
|
66 |
+
114
|
67 |
+
|
68 |
+
Infinite grammars will throw a RecursionError when not bounded by some ``depth``:
|
69 |
+
|
70 |
+
>>> grammar = CFG.fromstring("""
|
71 |
+
... S -> A B
|
72 |
+
... A -> B
|
73 |
+
... B -> "b" | A
|
74 |
+
... """)
|
75 |
+
>>> list(generate(grammar))
|
76 |
+
Traceback (most recent call last):
|
77 |
+
...
|
78 |
+
RuntimeError: The grammar has rule(s) that yield infinite recursion!
|
venv/lib/python3.10/site-packages/nltk/test/gensim.doctest
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
=======================================
|
5 |
+
Demonstrate word embedding using Gensim
|
6 |
+
=======================================
|
7 |
+
|
8 |
+
>>> from nltk.test.gensim_fixt import setup_module
|
9 |
+
>>> setup_module()
|
10 |
+
|
11 |
+
We demonstrate three functions:
|
12 |
+
- Train the word embeddings using brown corpus;
|
13 |
+
- Load the pre-trained model and perform simple tasks; and
|
14 |
+
- Pruning the pre-trained binary model.
|
15 |
+
|
16 |
+
>>> import gensim
|
17 |
+
|
18 |
+
---------------
|
19 |
+
Train the model
|
20 |
+
---------------
|
21 |
+
|
22 |
+
Here we train a word embedding using the Brown Corpus:
|
23 |
+
|
24 |
+
>>> from nltk.corpus import brown
|
25 |
+
>>> train_set = brown.sents()[:10000]
|
26 |
+
>>> model = gensim.models.Word2Vec(train_set)
|
27 |
+
|
28 |
+
It might take some time to train the model. So, after it is trained, it can be saved as follows:
|
29 |
+
|
30 |
+
>>> model.save('brown.embedding')
|
31 |
+
>>> new_model = gensim.models.Word2Vec.load('brown.embedding')
|
32 |
+
|
33 |
+
The model will be the list of words with their embedding. We can easily get the vector representation of a word.
|
34 |
+
|
35 |
+
>>> len(new_model.wv['university'])
|
36 |
+
100
|
37 |
+
|
38 |
+
There are some supporting functions already implemented in Gensim to manipulate with word embeddings.
|
39 |
+
For example, to compute the cosine similarity between 2 words:
|
40 |
+
|
41 |
+
>>> new_model.wv.similarity('university','school') > 0.3
|
42 |
+
True
|
43 |
+
|
44 |
+
---------------------------
|
45 |
+
Using the pre-trained model
|
46 |
+
---------------------------
|
47 |
+
|
48 |
+
NLTK includes a pre-trained model which is part of a model that is trained on 100 billion words from the Google News Dataset.
|
49 |
+
The full model is from https://code.google.com/p/word2vec/ (about 3 GB).
|
50 |
+
|
51 |
+
>>> from nltk.data import find
|
52 |
+
>>> word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
|
53 |
+
>>> model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
|
54 |
+
|
55 |
+
We pruned the model to only include the most common words (~44k words).
|
56 |
+
|
57 |
+
>>> len(model)
|
58 |
+
43981
|
59 |
+
|
60 |
+
Each word is represented in the space of 300 dimensions:
|
61 |
+
|
62 |
+
>>> len(model['university'])
|
63 |
+
300
|
64 |
+
|
65 |
+
Finding the top n words that are similar to a target word is simple. The result is the list of n words with the score.
|
66 |
+
|
67 |
+
>>> model.most_similar(positive=['university'], topn = 3)
|
68 |
+
[('universities', 0.70039...), ('faculty', 0.67809...), ('undergraduate', 0.65870...)]
|
69 |
+
|
70 |
+
Finding a word that is not in a list is also supported, although, implementing this by yourself is simple.
|
71 |
+
|
72 |
+
>>> model.doesnt_match('breakfast cereal dinner lunch'.split())
|
73 |
+
'cereal'
|
74 |
+
|
75 |
+
Mikolov et al. (2013) figured out that word embedding captures much of syntactic and semantic regularities. For example,
|
76 |
+
the vector 'King - Man + Woman' is close to 'Queen' and 'Germany - Berlin + Paris' is close to 'France'.
|
77 |
+
|
78 |
+
>>> model.most_similar(positive=['woman','king'], negative=['man'], topn = 1)
|
79 |
+
[('queen', 0.71181...)]
|
80 |
+
|
81 |
+
>>> model.most_similar(positive=['Paris','Germany'], negative=['Berlin'], topn = 1)
|
82 |
+
[('France', 0.78840...)]
|
83 |
+
|
84 |
+
We can visualize the word embeddings using t-SNE (https://lvdmaaten.github.io/tsne/). For this demonstration, we visualize the first 1000 words.
|
85 |
+
|
86 |
+
| import numpy as np
|
87 |
+
| labels = []
|
88 |
+
| count = 0
|
89 |
+
| max_count = 1000
|
90 |
+
| X = np.zeros(shape=(max_count,len(model['university'])))
|
91 |
+
|
|
92 |
+
| for term in model.index_to_key:
|
93 |
+
| X[count] = model[term]
|
94 |
+
| labels.append(term)
|
95 |
+
| count+= 1
|
96 |
+
| if count >= max_count: break
|
97 |
+
|
|
98 |
+
| # It is recommended to use PCA first to reduce to ~50 dimensions
|
99 |
+
| from sklearn.decomposition import PCA
|
100 |
+
| pca = PCA(n_components=50)
|
101 |
+
| X_50 = pca.fit_transform(X)
|
102 |
+
|
|
103 |
+
| # Using TSNE to further reduce to 2 dimensions
|
104 |
+
| from sklearn.manifold import TSNE
|
105 |
+
| model_tsne = TSNE(n_components=2, random_state=0)
|
106 |
+
| Y = model_tsne.fit_transform(X_50)
|
107 |
+
|
|
108 |
+
| # Show the scatter plot
|
109 |
+
| import matplotlib.pyplot as plt
|
110 |
+
| plt.scatter(Y[:,0], Y[:,1], 20)
|
111 |
+
|
|
112 |
+
| # Add labels
|
113 |
+
| for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
|
114 |
+
| plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10)
|
115 |
+
|
|
116 |
+
| plt.show()
|
117 |
+
|
118 |
+
------------------------------
|
119 |
+
Prune the trained binary model
|
120 |
+
------------------------------
|
121 |
+
|
122 |
+
Here is the supporting code to extract part of the binary model (GoogleNews-vectors-negative300.bin.gz) from https://code.google.com/p/word2vec/
|
123 |
+
We use this code to get the `word2vec_sample` model.
|
124 |
+
|
125 |
+
| import gensim
|
126 |
+
| # Load the binary model
|
127 |
+
| model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary = True)
|
128 |
+
|
|
129 |
+
| # Only output word that appear in the Brown corpus
|
130 |
+
| from nltk.corpus import brown
|
131 |
+
| words = set(brown.words())
|
132 |
+
| print(len(words))
|
133 |
+
|
|
134 |
+
| # Output presented word to a temporary file
|
135 |
+
| out_file = 'pruned.word2vec.txt'
|
136 |
+
| with open(out_file,'w') as f:
|
137 |
+
| word_presented = words.intersection(model.index_to_key)
|
138 |
+
| f.write('{} {}\n'.format(len(word_presented),len(model['word'])))
|
139 |
+
|
|
140 |
+
| for word in word_presented:
|
141 |
+
| f.write('{} {}\n'.format(word, ' '.join(str(value) for value in model[word])))
|
venv/lib/python3.10/site-packages/nltk/test/gensim_fixt.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
pytest.importorskip("gensim")
|
venv/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==============================================================================
|
5 |
+
Glue Semantics
|
6 |
+
==============================================================================
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
======================
|
11 |
+
Linear logic
|
12 |
+
======================
|
13 |
+
|
14 |
+
>>> from nltk.sem import logic
|
15 |
+
>>> from nltk.sem.glue import *
|
16 |
+
>>> from nltk.sem.linearlogic import *
|
17 |
+
|
18 |
+
>>> from nltk.sem.linearlogic import Expression
|
19 |
+
>>> read_expr = Expression.fromstring
|
20 |
+
|
21 |
+
Parser
|
22 |
+
|
23 |
+
>>> print(read_expr(r'f'))
|
24 |
+
f
|
25 |
+
>>> print(read_expr(r'(g -o f)'))
|
26 |
+
(g -o f)
|
27 |
+
>>> print(read_expr(r'(g -o (h -o f))'))
|
28 |
+
(g -o (h -o f))
|
29 |
+
>>> print(read_expr(r'((g -o G) -o G)'))
|
30 |
+
((g -o G) -o G)
|
31 |
+
>>> print(read_expr(r'(g -o f)(g)'))
|
32 |
+
(g -o f)(g)
|
33 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))'))
|
34 |
+
((g -o G) -o G)((g -o f))
|
35 |
+
|
36 |
+
Simplify
|
37 |
+
|
38 |
+
>>> print(read_expr(r'f').simplify())
|
39 |
+
f
|
40 |
+
>>> print(read_expr(r'(g -o f)').simplify())
|
41 |
+
(g -o f)
|
42 |
+
>>> print(read_expr(r'((g -o G) -o G)').simplify())
|
43 |
+
((g -o G) -o G)
|
44 |
+
>>> print(read_expr(r'(g -o f)(g)').simplify())
|
45 |
+
f
|
46 |
+
>>> try: read_expr(r'(g -o f)(f)').simplify()
|
47 |
+
... except LinearLogicApplicationException as e: print(e)
|
48 |
+
...
|
49 |
+
Cannot apply (g -o f) to f. Cannot unify g with f given {}
|
50 |
+
>>> print(read_expr(r'(G -o f)(g)').simplify())
|
51 |
+
f
|
52 |
+
>>> print(read_expr(r'((g -o G) -o G)((g -o f))').simplify())
|
53 |
+
f
|
54 |
+
|
55 |
+
Test BindingDict
|
56 |
+
|
57 |
+
>>> h = ConstantExpression('h')
|
58 |
+
>>> g = ConstantExpression('g')
|
59 |
+
>>> f = ConstantExpression('f')
|
60 |
+
|
61 |
+
>>> H = VariableExpression('H')
|
62 |
+
>>> G = VariableExpression('G')
|
63 |
+
>>> F = VariableExpression('F')
|
64 |
+
|
65 |
+
>>> d1 = BindingDict({H: h})
|
66 |
+
>>> d2 = BindingDict({F: f, G: F})
|
67 |
+
>>> d12 = d1 + d2
|
68 |
+
>>> all12 = ['%s: %s' % (v, d12[v]) for v in d12.d]
|
69 |
+
>>> all12.sort()
|
70 |
+
>>> print(all12)
|
71 |
+
['F: f', 'G: f', 'H: h']
|
72 |
+
|
73 |
+
>>> BindingDict([(F,f),(G,g),(H,h)]) == BindingDict({F:f, G:g, H:h})
|
74 |
+
True
|
75 |
+
|
76 |
+
>>> d4 = BindingDict({F: f})
|
77 |
+
>>> try: d4[F] = g
|
78 |
+
... except VariableBindingException as e: print(e)
|
79 |
+
Variable F already bound to another value
|
80 |
+
|
81 |
+
Test Unify
|
82 |
+
|
83 |
+
>>> try: f.unify(g, BindingDict())
|
84 |
+
... except UnificationException as e: print(e)
|
85 |
+
...
|
86 |
+
Cannot unify f with g given {}
|
87 |
+
|
88 |
+
>>> f.unify(G, BindingDict()) == BindingDict({G: f})
|
89 |
+
True
|
90 |
+
>>> try: f.unify(G, BindingDict({G: h}))
|
91 |
+
... except UnificationException as e: print(e)
|
92 |
+
...
|
93 |
+
Cannot unify f with G given {G: h}
|
94 |
+
>>> f.unify(G, BindingDict({G: f})) == BindingDict({G: f})
|
95 |
+
True
|
96 |
+
>>> f.unify(G, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
97 |
+
True
|
98 |
+
|
99 |
+
>>> G.unify(f, BindingDict()) == BindingDict({G: f})
|
100 |
+
True
|
101 |
+
>>> try: G.unify(f, BindingDict({G: h}))
|
102 |
+
... except UnificationException as e: print(e)
|
103 |
+
...
|
104 |
+
Cannot unify G with f given {G: h}
|
105 |
+
>>> G.unify(f, BindingDict({G: f})) == BindingDict({G: f})
|
106 |
+
True
|
107 |
+
>>> G.unify(f, BindingDict({H: f})) == BindingDict({G: f, H: f})
|
108 |
+
True
|
109 |
+
|
110 |
+
>>> G.unify(F, BindingDict()) == BindingDict({G: F})
|
111 |
+
True
|
112 |
+
>>> try: G.unify(F, BindingDict({G: H}))
|
113 |
+
... except UnificationException as e: print(e)
|
114 |
+
...
|
115 |
+
Cannot unify G with F given {G: H}
|
116 |
+
>>> G.unify(F, BindingDict({G: F})) == BindingDict({G: F})
|
117 |
+
True
|
118 |
+
>>> G.unify(F, BindingDict({H: F})) == BindingDict({G: F, H: F})
|
119 |
+
True
|
120 |
+
|
121 |
+
Test Compile
|
122 |
+
|
123 |
+
>>> print(read_expr('g').compile_pos(Counter(), GlueFormula))
|
124 |
+
(<ConstantExpression g>, [])
|
125 |
+
>>> print(read_expr('(g -o f)').compile_pos(Counter(), GlueFormula))
|
126 |
+
(<ImpExpression (g -o f)>, [])
|
127 |
+
>>> print(read_expr('(g -o (h -o f))').compile_pos(Counter(), GlueFormula))
|
128 |
+
(<ImpExpression (g -o (h -o f))>, [])
|
129 |
+
|
130 |
+
|
131 |
+
======================
|
132 |
+
Glue
|
133 |
+
======================
|
134 |
+
|
135 |
+
Demo of "John walks"
|
136 |
+
--------------------
|
137 |
+
|
138 |
+
>>> john = GlueFormula("John", "g")
|
139 |
+
>>> print(john)
|
140 |
+
John : g
|
141 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
142 |
+
>>> print(walks)
|
143 |
+
\x.walks(x) : (g -o f)
|
144 |
+
>>> print(walks.applyto(john))
|
145 |
+
\x.walks(x)(John) : (g -o f)(g)
|
146 |
+
>>> print(walks.applyto(john).simplify())
|
147 |
+
walks(John) : f
|
148 |
+
|
149 |
+
|
150 |
+
Demo of "A dog walks"
|
151 |
+
---------------------
|
152 |
+
|
153 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
154 |
+
>>> print(a)
|
155 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
156 |
+
>>> man = GlueFormula(r"\x.man(x)", "(gv -o gr)")
|
157 |
+
>>> print(man)
|
158 |
+
\x.man(x) : (gv -o gr)
|
159 |
+
>>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
|
160 |
+
>>> print(walks)
|
161 |
+
\x.walks(x) : (g -o f)
|
162 |
+
>>> a_man = a.applyto(man)
|
163 |
+
>>> print(a_man.simplify())
|
164 |
+
\Q.exists x.(man(x) & Q(x)) : ((g -o G) -o G)
|
165 |
+
>>> a_man_walks = a_man.applyto(walks)
|
166 |
+
>>> print(a_man_walks.simplify())
|
167 |
+
exists x.(man(x) & walks(x)) : f
|
168 |
+
|
169 |
+
|
170 |
+
Demo of 'every girl chases a dog'
|
171 |
+
---------------------------------
|
172 |
+
|
173 |
+
Individual words:
|
174 |
+
|
175 |
+
>>> every = GlueFormula("\\P Q.all x.(P(x) -> Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
|
176 |
+
>>> print(every)
|
177 |
+
\P Q.all x.(P(x) -> Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
178 |
+
>>> girl = GlueFormula(r"\x.girl(x)", "(gv -o gr)")
|
179 |
+
>>> print(girl)
|
180 |
+
\x.girl(x) : (gv -o gr)
|
181 |
+
>>> chases = GlueFormula(r"\x y.chases(x,y)", "(g -o (h -o f))")
|
182 |
+
>>> print(chases)
|
183 |
+
\x y.chases(x,y) : (g -o (h -o f))
|
184 |
+
>>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((hv -o hr) -o ((h -o H) -o H))")
|
185 |
+
>>> print(a)
|
186 |
+
\P Q.exists x.(P(x) & Q(x)) : ((hv -o hr) -o ((h -o H) -o H))
|
187 |
+
>>> dog = GlueFormula(r"\x.dog(x)", "(hv -o hr)")
|
188 |
+
>>> print(dog)
|
189 |
+
\x.dog(x) : (hv -o hr)
|
190 |
+
|
191 |
+
Noun Quantification can only be done one way:
|
192 |
+
|
193 |
+
>>> every_girl = every.applyto(girl)
|
194 |
+
>>> print(every_girl.simplify())
|
195 |
+
\Q.all x.(girl(x) -> Q(x)) : ((g -o G) -o G)
|
196 |
+
>>> a_dog = a.applyto(dog)
|
197 |
+
>>> print(a_dog.simplify())
|
198 |
+
\Q.exists x.(dog(x) & Q(x)) : ((h -o H) -o H)
|
199 |
+
|
200 |
+
The first reading is achieved by combining 'chases' with 'a dog' first.
|
201 |
+
Since 'a girl' requires something of the form '(h -o H)' we must
|
202 |
+
get rid of the 'g' in the glue of 'see'. We will do this with
|
203 |
+
the '-o elimination' rule. So, x1 will be our subject placeholder.
|
204 |
+
|
205 |
+
>>> xPrime = GlueFormula("x1", "g")
|
206 |
+
>>> print(xPrime)
|
207 |
+
x1 : g
|
208 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
209 |
+
>>> print(xPrime_chases.simplify())
|
210 |
+
\y.chases(x1,y) : (h -o f)
|
211 |
+
>>> xPrime_chases_a_dog = a_dog.applyto(xPrime_chases)
|
212 |
+
>>> print(xPrime_chases_a_dog.simplify())
|
213 |
+
exists x.(dog(x) & chases(x1,x)) : f
|
214 |
+
|
215 |
+
Now we can retract our subject placeholder using lambda-abstraction and
|
216 |
+
combine with the true subject.
|
217 |
+
|
218 |
+
>>> chases_a_dog = xPrime_chases_a_dog.lambda_abstract(xPrime)
|
219 |
+
>>> print(chases_a_dog.simplify())
|
220 |
+
\x1.exists x.(dog(x) & chases(x1,x)) : (g -o f)
|
221 |
+
>>> every_girl_chases_a_dog = every_girl.applyto(chases_a_dog)
|
222 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
223 |
+
>>> r2 = GlueFormula(r'all x.(girl(x) -> exists z1.(dog(z1) & chases(x,z1)))', 'f')
|
224 |
+
>>> r1 == r2
|
225 |
+
True
|
226 |
+
|
227 |
+
The second reading is achieved by combining 'every girl' with 'chases' first.
|
228 |
+
|
229 |
+
>>> xPrime = GlueFormula("x1", "g")
|
230 |
+
>>> print(xPrime)
|
231 |
+
x1 : g
|
232 |
+
>>> xPrime_chases = chases.applyto(xPrime)
|
233 |
+
>>> print(xPrime_chases.simplify())
|
234 |
+
\y.chases(x1,y) : (h -o f)
|
235 |
+
>>> yPrime = GlueFormula("x2", "h")
|
236 |
+
>>> print(yPrime)
|
237 |
+
x2 : h
|
238 |
+
>>> xPrime_chases_yPrime = xPrime_chases.applyto(yPrime)
|
239 |
+
>>> print(xPrime_chases_yPrime.simplify())
|
240 |
+
chases(x1,x2) : f
|
241 |
+
>>> chases_yPrime = xPrime_chases_yPrime.lambda_abstract(xPrime)
|
242 |
+
>>> print(chases_yPrime.simplify())
|
243 |
+
\x1.chases(x1,x2) : (g -o f)
|
244 |
+
>>> every_girl_chases_yPrime = every_girl.applyto(chases_yPrime)
|
245 |
+
>>> print(every_girl_chases_yPrime.simplify())
|
246 |
+
all x.(girl(x) -> chases(x,x2)) : f
|
247 |
+
>>> every_girl_chases = every_girl_chases_yPrime.lambda_abstract(yPrime)
|
248 |
+
>>> print(every_girl_chases.simplify())
|
249 |
+
\x2.all x.(girl(x) -> chases(x,x2)) : (h -o f)
|
250 |
+
>>> every_girl_chases_a_dog = a_dog.applyto(every_girl_chases)
|
251 |
+
>>> r1 = every_girl_chases_a_dog.simplify()
|
252 |
+
>>> r2 = GlueFormula(r'exists x.(dog(x) & all z2.(girl(z2) -> chases(z2,x)))', 'f')
|
253 |
+
>>> r1 == r2
|
254 |
+
True
|
255 |
+
|
256 |
+
|
257 |
+
Compilation
|
258 |
+
-----------
|
259 |
+
|
260 |
+
>>> for cp in GlueFormula('m', '(b -o a)').compile(Counter()): print(cp)
|
261 |
+
m : (b -o a) : {1}
|
262 |
+
>>> for cp in GlueFormula('m', '((c -o b) -o a)').compile(Counter()): print(cp)
|
263 |
+
v1 : c : {1}
|
264 |
+
m : (b[1] -o a) : {2}
|
265 |
+
>>> for cp in GlueFormula('m', '((d -o (c -o b)) -o a)').compile(Counter()): print(cp)
|
266 |
+
v1 : c : {1}
|
267 |
+
v2 : d : {2}
|
268 |
+
m : (b[1, 2] -o a) : {3}
|
269 |
+
>>> for cp in GlueFormula('m', '((d -o e) -o ((c -o b) -o a))').compile(Counter()): print(cp)
|
270 |
+
v1 : d : {1}
|
271 |
+
v2 : c : {2}
|
272 |
+
m : (e[1] -o (b[2] -o a)) : {3}
|
273 |
+
>>> for cp in GlueFormula('m', '(((d -o c) -o b) -o a)').compile(Counter()): print(cp)
|
274 |
+
v1 : (d -o c) : {1}
|
275 |
+
m : (b[1] -o a) : {2}
|
276 |
+
>>> for cp in GlueFormula('m', '((((e -o d) -o c) -o b) -o a)').compile(Counter()): print(cp)
|
277 |
+
v1 : e : {1}
|
278 |
+
v2 : (d[1] -o c) : {2}
|
279 |
+
m : (b[2] -o a) : {3}
|
280 |
+
|
281 |
+
|
282 |
+
Demo of 'a man walks' using Compilation
|
283 |
+
---------------------------------------
|
284 |
+
|
285 |
+
Premises
|
286 |
+
|
287 |
+
>>> a = GlueFormula('\\P Q.some x.(P(x) and Q(x))', '((gv -o gr) -o ((g -o G) -o G))')
|
288 |
+
>>> print(a)
|
289 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
|
290 |
+
|
291 |
+
>>> man = GlueFormula('\\x.man(x)', '(gv -o gr)')
|
292 |
+
>>> print(man)
|
293 |
+
\x.man(x) : (gv -o gr)
|
294 |
+
|
295 |
+
>>> walks = GlueFormula('\\x.walks(x)', '(g -o f)')
|
296 |
+
>>> print(walks)
|
297 |
+
\x.walks(x) : (g -o f)
|
298 |
+
|
299 |
+
Compiled Premises:
|
300 |
+
|
301 |
+
>>> counter = Counter()
|
302 |
+
>>> ahc = a.compile(counter)
|
303 |
+
>>> g1 = ahc[0]
|
304 |
+
>>> print(g1)
|
305 |
+
v1 : gv : {1}
|
306 |
+
>>> g2 = ahc[1]
|
307 |
+
>>> print(g2)
|
308 |
+
v2 : g : {2}
|
309 |
+
>>> g3 = ahc[2]
|
310 |
+
>>> print(g3)
|
311 |
+
\P Q.exists x.(P(x) & Q(x)) : (gr[1] -o (G[2] -o G)) : {3}
|
312 |
+
>>> g4 = man.compile(counter)[0]
|
313 |
+
>>> print(g4)
|
314 |
+
\x.man(x) : (gv -o gr) : {4}
|
315 |
+
>>> g5 = walks.compile(counter)[0]
|
316 |
+
>>> print(g5)
|
317 |
+
\x.walks(x) : (g -o f) : {5}
|
318 |
+
|
319 |
+
Derivation:
|
320 |
+
|
321 |
+
>>> g14 = g4.applyto(g1)
|
322 |
+
>>> print(g14.simplify())
|
323 |
+
man(v1) : gr : {1, 4}
|
324 |
+
>>> g134 = g3.applyto(g14)
|
325 |
+
>>> print(g134.simplify())
|
326 |
+
\Q.exists x.(man(x) & Q(x)) : (G[2] -o G) : {1, 3, 4}
|
327 |
+
>>> g25 = g5.applyto(g2)
|
328 |
+
>>> print(g25.simplify())
|
329 |
+
walks(v2) : f : {2, 5}
|
330 |
+
>>> g12345 = g134.applyto(g25)
|
331 |
+
>>> print(g12345.simplify())
|
332 |
+
exists x.(man(x) & walks(x)) : f : {1, 2, 3, 4, 5}
|
333 |
+
|
334 |
+
---------------------------------
|
335 |
+
Dependency Graph to Glue Formulas
|
336 |
+
---------------------------------
|
337 |
+
>>> from nltk.corpus.reader.dependency import DependencyGraph
|
338 |
+
|
339 |
+
>>> depgraph = DependencyGraph("""1 John _ NNP NNP _ 2 SUBJ _ _
|
340 |
+
... 2 sees _ VB VB _ 0 ROOT _ _
|
341 |
+
... 3 a _ ex_quant ex_quant _ 4 SPEC _ _
|
342 |
+
... 4 dog _ NN NN _ 2 OBJ _ _
|
343 |
+
... """)
|
344 |
+
>>> gfl = GlueDict('nltk:grammars/sample_grammars/glue.semtype').to_glueformula_list(depgraph)
|
345 |
+
>>> print(gfl) # doctest: +SKIP
|
346 |
+
[\x y.sees(x,y) : (f -o (i -o g)),
|
347 |
+
\x.dog(x) : (iv -o ir),
|
348 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I3) -o I3)),
|
349 |
+
\P Q.exists x.(P(x) & Q(x)) : ((fv -o fr) -o ((f -o F4) -o F4)),
|
350 |
+
\x.John(x) : (fv -o fr)]
|
351 |
+
>>> glue = Glue()
|
352 |
+
>>> for r in sorted([r.simplify().normalize() for r in glue.get_readings(glue.gfl_to_compiled(gfl))], key=str):
|
353 |
+
... print(r)
|
354 |
+
exists z1.(John(z1) & exists z2.(dog(z2) & sees(z1,z2)))
|
355 |
+
exists z1.(dog(z1) & exists z2.(John(z2) & sees(z2,z1)))
|
356 |
+
|
357 |
+
-----------------------------------
|
358 |
+
Dependency Graph to LFG f-structure
|
359 |
+
-----------------------------------
|
360 |
+
>>> from nltk.sem.lfg import FStructure
|
361 |
+
|
362 |
+
>>> fstruct = FStructure.read_depgraph(depgraph)
|
363 |
+
|
364 |
+
>>> print(fstruct) # doctest: +SKIP
|
365 |
+
f:[pred 'sees'
|
366 |
+
obj h:[pred 'dog'
|
367 |
+
spec 'a']
|
368 |
+
subj g:[pred 'John']]
|
369 |
+
|
370 |
+
>>> fstruct.to_depgraph().tree().pprint()
|
371 |
+
(sees (dog a) John)
|
372 |
+
|
373 |
+
---------------------------------
|
374 |
+
LFG f-structure to Glue
|
375 |
+
---------------------------------
|
376 |
+
>>> fstruct.to_glueformula_list(GlueDict('nltk:grammars/sample_grammars/glue.semtype')) # doctest: +SKIP
|
377 |
+
[\x y.sees(x,y) : (i -o (g -o f)),
|
378 |
+
\x.dog(x) : (gv -o gr),
|
379 |
+
\P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G3) -o G3)),
|
380 |
+
\P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I4) -o I4)),
|
381 |
+
\x.John(x) : (iv -o ir)]
|
382 |
+
|
383 |
+
.. see gluesemantics_malt.doctest for more
|
venv/lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. see also: gluesemantics.doctest
|
5 |
+
|
6 |
+
==============================================================================
|
7 |
+
Glue Semantics
|
8 |
+
==============================================================================
|
9 |
+
|
10 |
+
>>> from nltk.test.gluesemantics_malt_fixt import setup_module
|
11 |
+
>>> setup_module()
|
12 |
+
|
13 |
+
>>> from nltk.sem.glue import *
|
14 |
+
>>> nltk.sem.logic._counter._value = 0
|
15 |
+
|
16 |
+
--------------------------------
|
17 |
+
Initialize the Dependency Parser
|
18 |
+
--------------------------------
|
19 |
+
>>> from nltk.parse.malt import MaltParser
|
20 |
+
|
21 |
+
>>> tagger = RegexpTagger(
|
22 |
+
... [('^(John|Mary)$', 'NNP'),
|
23 |
+
... ('^(sees|chases)$', 'VB'),
|
24 |
+
... ('^(a)$', 'ex_quant'),
|
25 |
+
... ('^(every)$', 'univ_quant'),
|
26 |
+
... ('^(girl|dog)$', 'NN')
|
27 |
+
... ]).tag
|
28 |
+
>>> depparser = MaltParser(tagger=tagger)
|
29 |
+
|
30 |
+
--------------------
|
31 |
+
Automated Derivation
|
32 |
+
--------------------
|
33 |
+
>>> glue = Glue(depparser=depparser)
|
34 |
+
>>> readings = glue.parse_to_meaning('every girl chases a dog'.split())
|
35 |
+
>>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
|
36 |
+
... print(reading.normalize())
|
37 |
+
all z1.(girl(z1) -> exists z2.(dog(z2) & chases(z1,z2)))
|
38 |
+
exists z1.(dog(z1) & all z2.(girl(z2) -> chases(z2,z1)))
|
39 |
+
|
40 |
+
>>> drtglue = DrtGlue(depparser=depparser)
|
41 |
+
>>> readings = drtglue.parse_to_meaning('every girl chases a dog'.split())
|
42 |
+
>>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
|
43 |
+
... print(reading)
|
44 |
+
([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chases(z1,z2)]))])
|
45 |
+
([z1],[dog(z1), (([z2],[girl(z2)]) -> ([],[chases(z2,z1)]))])
|
46 |
+
|
47 |
+
--------------
|
48 |
+
With inference
|
49 |
+
--------------
|
50 |
+
|
51 |
+
Checking for equality of two DRSs is very useful when generating readings of a sentence.
|
52 |
+
For example, the ``glue`` module generates two readings for the sentence
|
53 |
+
*John sees Mary*:
|
54 |
+
|
55 |
+
>>> from nltk.sem.glue import DrtGlue
|
56 |
+
>>> readings = drtglue.parse_to_meaning('John sees Mary'.split())
|
57 |
+
>>> for drs in sorted([r.simplify().normalize() for r in readings], key=str):
|
58 |
+
... print(drs)
|
59 |
+
([z1,z2],[John(z1), Mary(z2), sees(z1,z2)])
|
60 |
+
([z1,z2],[Mary(z1), John(z2), sees(z2,z1)])
|
61 |
+
|
62 |
+
However, it is easy to tell that these two readings are logically the
|
63 |
+
same, and therefore one of them is superfluous. We can use the theorem prover
|
64 |
+
to determine this equivalence, and then delete one of them. A particular
|
65 |
+
theorem prover may be specified, or the argument may be left off to use the
|
66 |
+
default.
|
67 |
+
|
68 |
+
>>> readings[0].equiv(readings[1])
|
69 |
+
True
|
venv/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def setup_module():
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from nltk.parse.malt import MaltParser
|
5 |
+
|
6 |
+
try:
|
7 |
+
depparser = MaltParser()
|
8 |
+
except (AssertionError, LookupError) as e:
|
9 |
+
pytest.skip("MaltParser is not available")
|
venv/lib/python3.10/site-packages/nltk/test/grammar.doctest
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
===============
|
5 |
+
Grammar Parsing
|
6 |
+
===============
|
7 |
+
|
8 |
+
Grammars can be parsed from strings:
|
9 |
+
|
10 |
+
>>> from nltk import CFG
|
11 |
+
>>> grammar = CFG.fromstring("""
|
12 |
+
... S -> NP VP
|
13 |
+
... PP -> P NP
|
14 |
+
... NP -> Det N | NP PP
|
15 |
+
... VP -> V NP | VP PP
|
16 |
+
... Det -> 'a' | 'the'
|
17 |
+
... N -> 'dog' | 'cat'
|
18 |
+
... V -> 'chased' | 'sat'
|
19 |
+
... P -> 'on' | 'in'
|
20 |
+
... """)
|
21 |
+
>>> grammar
|
22 |
+
<Grammar with 14 productions>
|
23 |
+
>>> grammar.start()
|
24 |
+
S
|
25 |
+
>>> grammar.productions()
|
26 |
+
[S -> NP VP, PP -> P NP, NP -> Det N, NP -> NP PP, VP -> V NP, VP -> VP PP,
|
27 |
+
Det -> 'a', Det -> 'the', N -> 'dog', N -> 'cat', V -> 'chased', V -> 'sat',
|
28 |
+
P -> 'on', P -> 'in']
|
29 |
+
|
30 |
+
Probabilistic CFGs:
|
31 |
+
|
32 |
+
>>> from nltk import PCFG
|
33 |
+
>>> toy_pcfg1 = PCFG.fromstring("""
|
34 |
+
... S -> NP VP [1.0]
|
35 |
+
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
36 |
+
... Det -> 'the' [0.8] | 'my' [0.2]
|
37 |
+
... N -> 'man' [0.5] | 'telescope' [0.5]
|
38 |
+
... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
39 |
+
... V -> 'ate' [0.35] | 'saw' [0.65]
|
40 |
+
... PP -> P NP [1.0]
|
41 |
+
... P -> 'with' [0.61] | 'under' [0.39]
|
42 |
+
... """)
|
43 |
+
|
44 |
+
Chomsky Normal Form grammar (Test for bug 474)
|
45 |
+
|
46 |
+
>>> g = CFG.fromstring("VP^<TOP> -> VBP NP^<VP-TOP>")
|
47 |
+
>>> g.productions()[0].lhs()
|
48 |
+
VP^<TOP>
|
49 |
+
|
50 |
+
Grammars can contain both empty strings and empty productions:
|
51 |
+
|
52 |
+
>>> from nltk.grammar import CFG
|
53 |
+
>>> from nltk.parse.generate import generate
|
54 |
+
>>> grammar = CFG.fromstring("""
|
55 |
+
... S -> A B
|
56 |
+
... A -> 'a'
|
57 |
+
... # An empty string:
|
58 |
+
... B -> 'b' | ''
|
59 |
+
... """)
|
60 |
+
>>> list(generate(grammar))
|
61 |
+
[['a', 'b'], ['a', '']]
|
62 |
+
>>> grammar = CFG.fromstring("""
|
63 |
+
... S -> A B
|
64 |
+
... A -> 'a'
|
65 |
+
... # An empty production:
|
66 |
+
... B -> 'b' |
|
67 |
+
... """)
|
68 |
+
>>> list(generate(grammar))
|
69 |
+
[['a', 'b'], ['a']]
|
venv/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========================
|
5 |
+
Test Suites for Grammars
|
6 |
+
==========================
|
7 |
+
|
8 |
+
Sentences in the test suite are divided into two classes:
|
9 |
+
|
10 |
+
- grammatical (*accept*) and
|
11 |
+
- ungrammatical (*reject*).
|
12 |
+
|
13 |
+
If a sentence should parse according to the grammar, the value of
|
14 |
+
``trees`` will be a non-empty list. If a sentence should be rejected
|
15 |
+
according to the grammar, then the value of ``trees`` will be ``None``.
|
16 |
+
|
17 |
+
>>> from nltk.parse import TestGrammar
|
18 |
+
>>> germantest1 = {}
|
19 |
+
>>> germantest1['doc'] = "Tests for person agreement"
|
20 |
+
>>> germantest1['accept'] = [
|
21 |
+
... 'ich komme',
|
22 |
+
... 'ich sehe mich',
|
23 |
+
... 'du kommst',
|
24 |
+
... 'du siehst mich',
|
25 |
+
... 'sie kommt',
|
26 |
+
... 'sie sieht mich',
|
27 |
+
... 'ihr kommt',
|
28 |
+
... 'wir kommen',
|
29 |
+
... 'sie kommen',
|
30 |
+
... 'du magst mich',
|
31 |
+
... 'er mag mich',
|
32 |
+
... 'du folgst mir',
|
33 |
+
... 'sie hilft mir',
|
34 |
+
... ]
|
35 |
+
>>> germantest1['reject'] = [
|
36 |
+
... 'ich kommt',
|
37 |
+
... 'ich kommst',
|
38 |
+
... 'ich siehst mich',
|
39 |
+
... 'du komme',
|
40 |
+
... 'du sehe mich',
|
41 |
+
... 'du kommt',
|
42 |
+
... 'er komme',
|
43 |
+
... 'er siehst mich',
|
44 |
+
... 'wir komme',
|
45 |
+
... 'wir kommst',
|
46 |
+
... 'die Katzen kommst',
|
47 |
+
... 'sie komme',
|
48 |
+
... 'sie kommst',
|
49 |
+
... 'du mag mich',
|
50 |
+
... 'er magst mich',
|
51 |
+
... 'du folgt mir',
|
52 |
+
... 'sie hilfst mir',
|
53 |
+
... ]
|
54 |
+
>>> germantest2 = {}
|
55 |
+
>>> germantest2['doc'] = "Tests for number agreement"
|
56 |
+
>>> germantest2['accept'] = [
|
57 |
+
... 'der Hund kommt',
|
58 |
+
... 'die Hunde kommen',
|
59 |
+
... 'ich komme',
|
60 |
+
... 'wir kommen',
|
61 |
+
... 'ich sehe die Katzen',
|
62 |
+
... 'ich folge den Katzen',
|
63 |
+
... 'ich sehe die Katzen',
|
64 |
+
... 'ich folge den Katzen',
|
65 |
+
... 'wir sehen die Katzen',
|
66 |
+
... 'wir folgen den Katzen'
|
67 |
+
... ]
|
68 |
+
>>> germantest2['reject'] = [
|
69 |
+
... 'ich kommen',
|
70 |
+
... 'wir komme',
|
71 |
+
... 'der Hunde kommt',
|
72 |
+
... 'der Hunde kommen',
|
73 |
+
... 'die Katzen kommt',
|
74 |
+
... 'ich sehe der Hunde',
|
75 |
+
... 'ich folge den Hund',
|
76 |
+
... 'ich sehen der Hunde',
|
77 |
+
... 'ich folgen den Hund',
|
78 |
+
... 'wir sehe die Katzen',
|
79 |
+
... 'wir folge den Katzen'
|
80 |
+
... ]
|
81 |
+
>>> germantest3 = {}
|
82 |
+
>>> germantest3['doc'] = "Tests for case government and subcategorization"
|
83 |
+
>>> germantest3['accept'] = [
|
84 |
+
... 'der Hund sieht mich',
|
85 |
+
... 'der Hund kommt',
|
86 |
+
... 'ich sehe den Hund',
|
87 |
+
... 'ich helfe dem Hund',
|
88 |
+
... ]
|
89 |
+
>>> germantest3['reject'] = [
|
90 |
+
... 'ich sehe',
|
91 |
+
... 'ich helfe',
|
92 |
+
... 'ich komme den Hund',
|
93 |
+
... 'ich sehe den Hund die Katzen',
|
94 |
+
... 'du hilfst mich',
|
95 |
+
... 'du siehst mir',
|
96 |
+
... 'du siehst ich',
|
97 |
+
... 'der Hunde kommt mich',
|
98 |
+
... 'die Hunde sehe die Hunde',
|
99 |
+
... 'der Hund sehe die Hunde',
|
100 |
+
... 'ich hilft den Hund',
|
101 |
+
... 'ich hilft der Hund',
|
102 |
+
... 'ich sehe dem Hund',
|
103 |
+
... ]
|
104 |
+
>>> germantestsuites = [germantest1, germantest2, germantest3]
|
105 |
+
>>> tester = TestGrammar('grammars/book_grammars/german.fcfg', germantestsuites)
|
106 |
+
>>> tester.run()
|
107 |
+
Tests for person agreement: All tests passed!
|
108 |
+
Tests for number agreement: All tests passed!
|
109 |
+
Tests for case government and subcategorization: All tests passed!
|
venv/lib/python3.10/site-packages/nltk/test/index.doctest
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
.. _align howto: align.html
|
5 |
+
.. _ccg howto: ccg.html
|
6 |
+
.. _chat80 howto: chat80.html
|
7 |
+
.. _childes howto: childes.html
|
8 |
+
.. _chunk howto: chunk.html
|
9 |
+
.. _classify howto: classify.html
|
10 |
+
.. _collocations howto: collocations.html
|
11 |
+
.. _compat howto: compat.html
|
12 |
+
.. _corpus howto: corpus.html
|
13 |
+
.. _data howto: data.html
|
14 |
+
.. _dependency howto: dependency.html
|
15 |
+
.. _discourse howto: discourse.html
|
16 |
+
.. _drt howto: drt.html
|
17 |
+
.. _featgram howto: featgram.html
|
18 |
+
.. _featstruct howto: featstruct.html
|
19 |
+
.. _framenet howto: framenet.html
|
20 |
+
.. _generate howto: generate.html
|
21 |
+
.. _gluesemantics howto: gluesemantics.html
|
22 |
+
.. _gluesemantics_malt howto: gluesemantics_malt.html
|
23 |
+
.. _grammar howto: grammar.html
|
24 |
+
.. _grammartestsuites howto: grammartestsuites.html
|
25 |
+
.. _index howto: index.html
|
26 |
+
.. _inference howto: inference.html
|
27 |
+
.. _internals howto: internals.html
|
28 |
+
.. _japanese howto: japanese.html
|
29 |
+
.. _logic howto: logic.html
|
30 |
+
.. _metrics howto: metrics.html
|
31 |
+
.. _misc howto: misc.html
|
32 |
+
.. _nonmonotonic howto: nonmonotonic.html
|
33 |
+
.. _parse howto: parse.html
|
34 |
+
.. _portuguese_en howto: portuguese_en.html
|
35 |
+
.. _probability howto: probability.html
|
36 |
+
.. _propbank howto: propbank.html
|
37 |
+
.. _relextract howto: relextract.html
|
38 |
+
.. _resolution howto: resolution.html
|
39 |
+
.. _semantics howto: semantics.html
|
40 |
+
.. _simple howto: simple.html
|
41 |
+
.. _stem howto: stem.html
|
42 |
+
.. _tag howto: tag.html
|
43 |
+
.. _tokenize howto: tokenize.html
|
44 |
+
.. _toolbox howto: toolbox.html
|
45 |
+
.. _tree howto: tree.html
|
46 |
+
.. _treetransforms howto: treetransforms.html
|
47 |
+
.. _util howto: util.html
|
48 |
+
.. _wordnet howto: wordnet.html
|
49 |
+
.. _wordnet_lch howto: wordnet_lch.html
|
50 |
+
|
51 |
+
===========
|
52 |
+
NLTK HOWTOs
|
53 |
+
===========
|
54 |
+
|
55 |
+
* `align HOWTO`_
|
56 |
+
* `ccg HOWTO`_
|
57 |
+
* `chat80 HOWTO`_
|
58 |
+
* `childes HOWTO`_
|
59 |
+
* `chunk HOWTO`_
|
60 |
+
* `classify HOWTO`_
|
61 |
+
* `collocations HOWTO`_
|
62 |
+
* `compat HOWTO`_
|
63 |
+
* `corpus HOWTO`_
|
64 |
+
* `data HOWTO`_
|
65 |
+
* `dependency HOWTO`_
|
66 |
+
* `discourse HOWTO`_
|
67 |
+
* `drt HOWTO`_
|
68 |
+
* `featgram HOWTO`_
|
69 |
+
* `featstruct HOWTO`_
|
70 |
+
* `framenet HOWTO`_
|
71 |
+
* `generate HOWTO`_
|
72 |
+
* `gluesemantics HOWTO`_
|
73 |
+
* `gluesemantics_malt HOWTO`_
|
74 |
+
* `grammar HOWTO`_
|
75 |
+
* `grammartestsuites HOWTO`_
|
76 |
+
* `index HOWTO`_
|
77 |
+
* `inference HOWTO`_
|
78 |
+
* `internals HOWTO`_
|
79 |
+
* `japanese HOWTO`_
|
80 |
+
* `logic HOWTO`_
|
81 |
+
* `metrics HOWTO`_
|
82 |
+
* `misc HOWTO`_
|
83 |
+
* `nonmonotonic HOWTO`_
|
84 |
+
* `parse HOWTO`_
|
85 |
+
* `portuguese_en HOWTO`_
|
86 |
+
* `probability HOWTO`_
|
87 |
+
* `propbank HOWTO`_
|
88 |
+
* `relextract HOWTO`_
|
89 |
+
* `resolution HOWTO`_
|
90 |
+
* `semantics HOWTO`_
|
91 |
+
* `simple HOWTO`_
|
92 |
+
* `stem HOWTO`_
|
93 |
+
* `tag HOWTO`_
|
94 |
+
* `tokenize HOWTO`_
|
95 |
+
* `toolbox HOWTO`_
|
96 |
+
* `tree HOWTO`_
|
97 |
+
* `treetransforms HOWTO`_
|
98 |
+
* `util HOWTO`_
|
99 |
+
* `wordnet HOWTO`_
|
100 |
+
* `wordnet_lch HOWTO`_
|
venv/lib/python3.10/site-packages/nltk/test/inference.doctest
ADDED
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
====================================
|
5 |
+
Logical Inference and Model Building
|
6 |
+
====================================
|
7 |
+
|
8 |
+
>>> from nltk.test.setup_fixt import check_binary
|
9 |
+
>>> check_binary('mace4')
|
10 |
+
|
11 |
+
>>> from nltk import *
|
12 |
+
>>> from nltk.sem.drt import DrtParser
|
13 |
+
>>> from nltk.sem import logic
|
14 |
+
>>> logic._counter._value = 0
|
15 |
+
|
16 |
+
------------
|
17 |
+
Introduction
|
18 |
+
------------
|
19 |
+
|
20 |
+
Within the area of automated reasoning, first order theorem proving
|
21 |
+
and model building (or model generation) have both received much
|
22 |
+
attention, and have given rise to highly sophisticated techniques. We
|
23 |
+
focus therefore on providing an NLTK interface to third party tools
|
24 |
+
for these tasks. In particular, the module ``nltk.inference`` can be
|
25 |
+
used to access both theorem provers and model builders.
|
26 |
+
|
27 |
+
---------------------------------
|
28 |
+
NLTK Interface to Theorem Provers
|
29 |
+
---------------------------------
|
30 |
+
|
31 |
+
The main class used to interface with a theorem prover is the ``Prover``
|
32 |
+
class, found in ``nltk.api``. The ``prove()`` method takes three optional
|
33 |
+
arguments: a goal, a list of assumptions, and a ``verbose`` boolean to
|
34 |
+
indicate whether the proof should be printed to the console. The proof goal
|
35 |
+
and any assumptions need to be instances of the ``Expression`` class
|
36 |
+
specified by ``nltk.sem.logic``. There are currently three theorem provers
|
37 |
+
included with NLTK: ``Prover9``, ``TableauProver``, and
|
38 |
+
``ResolutionProver``. The first is an off-the-shelf prover, while the other
|
39 |
+
two are written in Python and included in the ``nltk.inference`` package.
|
40 |
+
|
41 |
+
>>> from nltk.sem import Expression
|
42 |
+
>>> read_expr = Expression.fromstring
|
43 |
+
>>> p1 = read_expr('man(socrates)')
|
44 |
+
>>> p2 = read_expr('all x.(man(x) -> mortal(x))')
|
45 |
+
>>> c = read_expr('mortal(socrates)')
|
46 |
+
>>> Prover9().prove(c, [p1,p2])
|
47 |
+
True
|
48 |
+
>>> TableauProver().prove(c, [p1,p2])
|
49 |
+
True
|
50 |
+
>>> ResolutionProver().prove(c, [p1,p2], verbose=True)
|
51 |
+
[1] {-mortal(socrates)} A
|
52 |
+
[2] {man(socrates)} A
|
53 |
+
[3] {-man(z2), mortal(z2)} A
|
54 |
+
[4] {-man(socrates)} (1, 3)
|
55 |
+
[5] {mortal(socrates)} (2, 3)
|
56 |
+
[6] {} (1, 5)
|
57 |
+
<BLANKLINE>
|
58 |
+
True
|
59 |
+
|
60 |
+
---------------------
|
61 |
+
The ``ProverCommand``
|
62 |
+
---------------------
|
63 |
+
|
64 |
+
A ``ProverCommand`` is a stateful holder for a theorem
|
65 |
+
prover. The command stores a theorem prover instance (of type ``Prover``),
|
66 |
+
a goal, a list of assumptions, the result of the proof, and a string version
|
67 |
+
of the entire proof. Corresponding to the three included ``Prover``
|
68 |
+
implementations, there are three ``ProverCommand`` implementations:
|
69 |
+
``Prover9Command``, ``TableauProverCommand``, and
|
70 |
+
``ResolutionProverCommand``.
|
71 |
+
|
72 |
+
The ``ProverCommand``'s constructor takes its goal and assumptions. The
|
73 |
+
``prove()`` command executes the ``Prover`` and ``proof()``
|
74 |
+
returns a String form of the proof
|
75 |
+
If the ``prove()`` method has not been called,
|
76 |
+
then the prover command will be unable to display a proof.
|
77 |
+
|
78 |
+
>>> prover = ResolutionProverCommand(c, [p1,p2])
|
79 |
+
>>> print(prover.proof())
|
80 |
+
Traceback (most recent call last):
|
81 |
+
File "...", line 1212, in __run
|
82 |
+
compileflags, 1) in test.globs
|
83 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
84 |
+
File "...", line ..., in proof
|
85 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
86 |
+
LookupError: You have to call prove() first to get a proof!
|
87 |
+
>>> prover.prove()
|
88 |
+
True
|
89 |
+
>>> print(prover.proof())
|
90 |
+
[1] {-mortal(socrates)} A
|
91 |
+
[2] {man(socrates)} A
|
92 |
+
[3] {-man(z4), mortal(z4)} A
|
93 |
+
[4] {-man(socrates)} (1, 3)
|
94 |
+
[5] {mortal(socrates)} (2, 3)
|
95 |
+
[6] {} (1, 5)
|
96 |
+
<BLANKLINE>
|
97 |
+
|
98 |
+
The prover command stores the result of proving so that if ``prove()`` is
|
99 |
+
called again, then the command can return the result without executing the
|
100 |
+
prover again. This allows the user to access the result of the proof without
|
101 |
+
wasting time re-computing what it already knows.
|
102 |
+
|
103 |
+
>>> prover.prove()
|
104 |
+
True
|
105 |
+
>>> prover.prove()
|
106 |
+
True
|
107 |
+
|
108 |
+
The assumptions and goal may be accessed using the ``assumptions()`` and
|
109 |
+
``goal()`` methods, respectively.
|
110 |
+
|
111 |
+
>>> prover.assumptions()
|
112 |
+
[<ApplicationExpression man(socrates)>, <AllExpression all x.(man(x) -> mortal(x))>]
|
113 |
+
>>> prover.goal()
|
114 |
+
<ApplicationExpression mortal(socrates)>
|
115 |
+
|
116 |
+
The assumptions list may be modified using the ``add_assumptions()`` and
|
117 |
+
``retract_assumptions()`` methods. Both methods take a list of ``Expression``
|
118 |
+
objects. Since adding or removing assumptions may change the result of the
|
119 |
+
proof, the stored result is cleared when either of these methods are called.
|
120 |
+
That means that ``proof()`` will be unavailable until ``prove()`` is called and
|
121 |
+
a call to ``prove()`` will execute the theorem prover.
|
122 |
+
|
123 |
+
>>> prover.retract_assumptions([read_expr('man(socrates)')])
|
124 |
+
>>> print(prover.proof())
|
125 |
+
Traceback (most recent call last):
|
126 |
+
File "...", line 1212, in __run
|
127 |
+
compileflags, 1) in test.globs
|
128 |
+
File "<doctest nltk/test/inference.doctest[10]>", line 1, in <module>
|
129 |
+
File "...", line ..., in proof
|
130 |
+
raise LookupError("You have to call prove() first to get a proof!")
|
131 |
+
LookupError: You have to call prove() first to get a proof!
|
132 |
+
>>> prover.prove()
|
133 |
+
False
|
134 |
+
>>> print(prover.proof())
|
135 |
+
[1] {-mortal(socrates)} A
|
136 |
+
[2] {-man(z6), mortal(z6)} A
|
137 |
+
[3] {-man(socrates)} (1, 2)
|
138 |
+
<BLANKLINE>
|
139 |
+
>>> prover.add_assumptions([read_expr('man(socrates)')])
|
140 |
+
>>> prover.prove()
|
141 |
+
True
|
142 |
+
|
143 |
+
-------
|
144 |
+
Prover9
|
145 |
+
-------
|
146 |
+
|
147 |
+
Prover9 Installation
|
148 |
+
~~~~~~~~~~~~~~~~~~~~
|
149 |
+
|
150 |
+
You can download Prover9 from https://www.cs.unm.edu/~mccune/prover9/.
|
151 |
+
|
152 |
+
Extract the source code into a suitable directory and follow the
|
153 |
+
instructions in the Prover9 ``README.make`` file to compile the executables.
|
154 |
+
Install these into an appropriate location; the
|
155 |
+
``prover9_search`` variable is currently configured to look in the
|
156 |
+
following locations:
|
157 |
+
|
158 |
+
>>> p = Prover9()
|
159 |
+
>>> p.binary_locations()
|
160 |
+
['/usr/local/bin/prover9',
|
161 |
+
'/usr/local/bin/prover9/bin',
|
162 |
+
'/usr/local/bin',
|
163 |
+
'/usr/bin',
|
164 |
+
'/usr/local/prover9',
|
165 |
+
'/usr/local/share/prover9']
|
166 |
+
|
167 |
+
Alternatively, the environment variable ``PROVER9HOME`` may be configured with
|
168 |
+
the binary's location.
|
169 |
+
|
170 |
+
The path to the correct directory can be set manually in the following
|
171 |
+
manner:
|
172 |
+
|
173 |
+
>>> config_prover9(path='/usr/local/bin') # doctest: +SKIP
|
174 |
+
[Found prover9: /usr/local/bin/prover9]
|
175 |
+
|
176 |
+
If the executables cannot be found, ``Prover9`` will issue a warning message:
|
177 |
+
|
178 |
+
>>> p.prove() # doctest: +SKIP
|
179 |
+
Traceback (most recent call last):
|
180 |
+
...
|
181 |
+
LookupError:
|
182 |
+
===========================================================================
|
183 |
+
NLTK was unable to find the prover9 executable! Use config_prover9() or
|
184 |
+
set the PROVER9HOME environment variable.
|
185 |
+
<BLANKLINE>
|
186 |
+
>> config_prover9('/path/to/prover9')
|
187 |
+
<BLANKLINE>
|
188 |
+
For more information, on prover9, see:
|
189 |
+
<https://www.cs.unm.edu/~mccune/prover9/>
|
190 |
+
===========================================================================
|
191 |
+
|
192 |
+
|
193 |
+
Using Prover9
|
194 |
+
~~~~~~~~~~~~~
|
195 |
+
|
196 |
+
The general case in theorem proving is to determine whether ``S |- g``
|
197 |
+
holds, where ``S`` is a possibly empty set of assumptions, and ``g``
|
198 |
+
is a proof goal.
|
199 |
+
|
200 |
+
As mentioned earlier, NLTK input to ``Prover9`` must be
|
201 |
+
``Expression``\ s of ``nltk.sem.logic``. A ``Prover9`` instance is
|
202 |
+
initialized with a proof goal and, possibly, some assumptions. The
|
203 |
+
``prove()`` method attempts to find a proof of the goal, given the
|
204 |
+
list of assumptions (in this case, none).
|
205 |
+
|
206 |
+
>>> goal = read_expr('(man(x) <-> --man(x))')
|
207 |
+
>>> prover = Prover9Command(goal)
|
208 |
+
>>> prover.prove()
|
209 |
+
True
|
210 |
+
|
211 |
+
Given a ``ProverCommand`` instance ``prover``, the method
|
212 |
+
``prover.proof()`` will return a String of the extensive proof information
|
213 |
+
provided by Prover9, shown in abbreviated form here::
|
214 |
+
|
215 |
+
============================== Prover9 ===============================
|
216 |
+
Prover9 (32) version ...
|
217 |
+
Process ... was started by ... on ...
|
218 |
+
...
|
219 |
+
The command was ".../prover9 -f ...".
|
220 |
+
============================== end of head ===========================
|
221 |
+
|
222 |
+
============================== INPUT =================================
|
223 |
+
|
224 |
+
% Reading from file /var/...
|
225 |
+
|
226 |
+
|
227 |
+
formulas(goals).
|
228 |
+
(all x (man(x) -> man(x))).
|
229 |
+
end_of_list.
|
230 |
+
|
231 |
+
...
|
232 |
+
============================== end of search =========================
|
233 |
+
|
234 |
+
THEOREM PROVED
|
235 |
+
|
236 |
+
Exiting with 1 proof.
|
237 |
+
|
238 |
+
Process 6317 exit (max_proofs) Mon Jan 21 15:23:28 2008
|
239 |
+
|
240 |
+
|
241 |
+
As mentioned earlier, we may want to list some assumptions for
|
242 |
+
the proof, as shown here.
|
243 |
+
|
244 |
+
>>> g = read_expr('mortal(socrates)')
|
245 |
+
>>> a1 = read_expr('all x.(man(x) -> mortal(x))')
|
246 |
+
>>> prover = Prover9Command(g, assumptions=[a1])
|
247 |
+
>>> prover.print_assumptions()
|
248 |
+
all x.(man(x) -> mortal(x))
|
249 |
+
|
250 |
+
However, the assumptions are not sufficient to derive the goal:
|
251 |
+
|
252 |
+
>>> print(prover.prove())
|
253 |
+
False
|
254 |
+
|
255 |
+
So let's add another assumption:
|
256 |
+
|
257 |
+
>>> a2 = read_expr('man(socrates)')
|
258 |
+
>>> prover.add_assumptions([a2])
|
259 |
+
>>> prover.print_assumptions()
|
260 |
+
all x.(man(x) -> mortal(x))
|
261 |
+
man(socrates)
|
262 |
+
>>> print(prover.prove())
|
263 |
+
True
|
264 |
+
|
265 |
+
We can also show the assumptions in ``Prover9`` format.
|
266 |
+
|
267 |
+
>>> prover.print_assumptions(output_format='Prover9')
|
268 |
+
all x (man(x) -> mortal(x))
|
269 |
+
man(socrates)
|
270 |
+
|
271 |
+
>>> prover.print_assumptions(output_format='Spass')
|
272 |
+
Traceback (most recent call last):
|
273 |
+
. . .
|
274 |
+
NameError: Unrecognized value for 'output_format': Spass
|
275 |
+
|
276 |
+
Assumptions can be retracted from the list of assumptions.
|
277 |
+
|
278 |
+
>>> prover.retract_assumptions([a1])
|
279 |
+
>>> prover.print_assumptions()
|
280 |
+
man(socrates)
|
281 |
+
>>> prover.retract_assumptions([a1])
|
282 |
+
|
283 |
+
Statements can be loaded from a file and parsed. We can then add these
|
284 |
+
statements as new assumptions.
|
285 |
+
|
286 |
+
>>> g = read_expr('all x.(boxer(x) -> -boxerdog(x))')
|
287 |
+
>>> prover = Prover9Command(g)
|
288 |
+
>>> prover.prove()
|
289 |
+
False
|
290 |
+
>>> import nltk.data
|
291 |
+
>>> new = nltk.data.load('grammars/sample_grammars/background0.fol')
|
292 |
+
>>> for a in new:
|
293 |
+
... print(a)
|
294 |
+
all x.(boxerdog(x) -> dog(x))
|
295 |
+
all x.(boxer(x) -> person(x))
|
296 |
+
all x.-(dog(x) & person(x))
|
297 |
+
exists x.boxer(x)
|
298 |
+
exists x.boxerdog(x)
|
299 |
+
>>> prover.add_assumptions(new)
|
300 |
+
>>> print(prover.prove())
|
301 |
+
True
|
302 |
+
>>> print(prover.proof())
|
303 |
+
============================== prooftrans ============================
|
304 |
+
Prover9 (...) version ...
|
305 |
+
Process ... was started by ... on ...
|
306 |
+
...
|
307 |
+
The command was ".../prover9".
|
308 |
+
============================== end of head ===========================
|
309 |
+
<BLANKLINE>
|
310 |
+
============================== end of input ==========================
|
311 |
+
<BLANKLINE>
|
312 |
+
============================== PROOF =================================
|
313 |
+
<BLANKLINE>
|
314 |
+
% -------- Comments from original proof --------
|
315 |
+
% Proof 1 at ... seconds.
|
316 |
+
% Length of proof is 13.
|
317 |
+
% Level of proof is 4.
|
318 |
+
% Maximum clause weight is 0.
|
319 |
+
% Given clauses 0.
|
320 |
+
<BLANKLINE>
|
321 |
+
1 (all x (boxerdog(x) -> dog(x))). [assumption].
|
322 |
+
2 (all x (boxer(x) -> person(x))). [assumption].
|
323 |
+
3 (all x -(dog(x) & person(x))). [assumption].
|
324 |
+
6 (all x (boxer(x) -> -boxerdog(x))). [goal].
|
325 |
+
8 -boxerdog(x) | dog(x). [clausify(1)].
|
326 |
+
9 boxerdog(c3). [deny(6)].
|
327 |
+
11 -boxer(x) | person(x). [clausify(2)].
|
328 |
+
12 boxer(c3). [deny(6)].
|
329 |
+
14 -dog(x) | -person(x). [clausify(3)].
|
330 |
+
15 dog(c3). [resolve(9,a,8,a)].
|
331 |
+
18 person(c3). [resolve(12,a,11,a)].
|
332 |
+
19 -person(c3). [resolve(15,a,14,a)].
|
333 |
+
20 $F. [resolve(19,a,18,a)].
|
334 |
+
<BLANKLINE>
|
335 |
+
============================== end of proof ==========================
|
336 |
+
|
337 |
+
----------------------
|
338 |
+
The equiv() method
|
339 |
+
----------------------
|
340 |
+
|
341 |
+
One application of the theorem prover functionality is to check if
|
342 |
+
two Expressions have the same meaning.
|
343 |
+
The ``equiv()`` method calls a theorem prover to determine whether two
|
344 |
+
Expressions are logically equivalent.
|
345 |
+
|
346 |
+
>>> a = read_expr(r'exists x.(man(x) & walks(x))')
|
347 |
+
>>> b = read_expr(r'exists x.(walks(x) & man(x))')
|
348 |
+
>>> print(a.equiv(b))
|
349 |
+
True
|
350 |
+
|
351 |
+
The same method can be used on Discourse Representation Structures (DRSs).
|
352 |
+
In this case, each DRS is converted to a first order logic form, and then
|
353 |
+
passed to the theorem prover.
|
354 |
+
|
355 |
+
>>> dp = DrtParser()
|
356 |
+
>>> a = dp.parse(r'([x],[man(x), walks(x)])')
|
357 |
+
>>> b = dp.parse(r'([x],[walks(x), man(x)])')
|
358 |
+
>>> print(a.equiv(b))
|
359 |
+
True
|
360 |
+
|
361 |
+
|
362 |
+
--------------------------------
|
363 |
+
NLTK Interface to Model Builders
|
364 |
+
--------------------------------
|
365 |
+
|
366 |
+
The top-level to model builders is parallel to that for
|
367 |
+
theorem-provers. The ``ModelBuilder`` interface is located
|
368 |
+
in ``nltk.inference.api``. It is currently only implemented by
|
369 |
+
``Mace``, which interfaces with the Mace4 model builder.
|
370 |
+
|
371 |
+
Typically we use a model builder to show that some set of formulas has
|
372 |
+
a model, and is therefore consistent. One way of doing this is by
|
373 |
+
treating our candidate set of sentences as assumptions, and leaving
|
374 |
+
the goal unspecified.
|
375 |
+
Thus, the following interaction shows how both ``{a, c1}`` and ``{a, c2}``
|
376 |
+
are consistent sets, since Mace succeeds in a building a
|
377 |
+
model for each of them, while ``{c1, c2}`` is inconsistent.
|
378 |
+
|
379 |
+
>>> a3 = read_expr('exists x.(man(x) and walks(x))')
|
380 |
+
>>> c1 = read_expr('mortal(socrates)')
|
381 |
+
>>> c2 = read_expr('-mortal(socrates)')
|
382 |
+
>>> mace = Mace()
|
383 |
+
>>> print(mace.build_model(None, [a3, c1]))
|
384 |
+
True
|
385 |
+
>>> print(mace.build_model(None, [a3, c2]))
|
386 |
+
True
|
387 |
+
|
388 |
+
We can also use the model builder as an adjunct to theorem prover.
|
389 |
+
Let's suppose we are trying to prove ``S |- g``, i.e. that ``g``
|
390 |
+
is logically entailed by assumptions ``S = {s1, s2, ..., sn}``.
|
391 |
+
We can this same input to Mace4, and the model builder will try to
|
392 |
+
find a counterexample, that is, to show that ``g`` does *not* follow
|
393 |
+
from ``S``. So, given this input, Mace4 will try to find a model for
|
394 |
+
the set ``S' = {s1, s2, ..., sn, (not g)}``. If ``g`` fails to follow
|
395 |
+
from ``S``, then Mace4 may well return with a counterexample faster
|
396 |
+
than Prover9 concludes that it cannot find the required proof.
|
397 |
+
Conversely, if ``g`` *is* provable from ``S``, Mace4 may take a long
|
398 |
+
time unsuccessfully trying to find a counter model, and will eventually give up.
|
399 |
+
|
400 |
+
In the following example, we see that the model builder does succeed
|
401 |
+
in building a model of the assumptions together with the negation of
|
402 |
+
the goal. That is, it succeeds in finding a model
|
403 |
+
where there is a woman that every man loves; Adam is a man; Eve is a
|
404 |
+
woman; but Adam does not love Eve.
|
405 |
+
|
406 |
+
>>> a4 = read_expr('exists y. (woman(y) & all x. (man(x) -> love(x,y)))')
|
407 |
+
>>> a5 = read_expr('man(adam)')
|
408 |
+
>>> a6 = read_expr('woman(eve)')
|
409 |
+
>>> g = read_expr('love(adam,eve)')
|
410 |
+
>>> print(mace.build_model(g, [a4, a5, a6]))
|
411 |
+
True
|
412 |
+
|
413 |
+
The Model Builder will fail to find a model if the assumptions do entail
|
414 |
+
the goal. Mace will continue to look for models of ever-increasing sizes
|
415 |
+
until the end_size number is reached. By default, end_size is 500,
|
416 |
+
but it can be set manually for quicker response time.
|
417 |
+
|
418 |
+
>>> a7 = read_expr('all x.(man(x) -> mortal(x))')
|
419 |
+
>>> a8 = read_expr('man(socrates)')
|
420 |
+
>>> g2 = read_expr('mortal(socrates)')
|
421 |
+
>>> print(Mace(end_size=50).build_model(g2, [a7, a8]))
|
422 |
+
False
|
423 |
+
|
424 |
+
There is also a ``ModelBuilderCommand`` class that, like ``ProverCommand``,
|
425 |
+
stores a ``ModelBuilder``, a goal, assumptions, a result, and a model. The
|
426 |
+
only implementation in NLTK is ``MaceCommand``.
|
427 |
+
|
428 |
+
|
429 |
+
-----
|
430 |
+
Mace4
|
431 |
+
-----
|
432 |
+
|
433 |
+
Mace4 Installation
|
434 |
+
~~~~~~~~~~~~~~~~~~
|
435 |
+
|
436 |
+
Mace4 is packaged with Prover9, and can be downloaded from the same
|
437 |
+
source, namely https://www.cs.unm.edu/~mccune/prover9/. It is installed
|
438 |
+
in the same manner as Prover9.
|
439 |
+
|
440 |
+
Using Mace4
|
441 |
+
~~~~~~~~~~~
|
442 |
+
|
443 |
+
Check whether Mace4 can find a model.
|
444 |
+
|
445 |
+
>>> a = read_expr('(see(mary,john) & -(mary = john))')
|
446 |
+
>>> mb = MaceCommand(assumptions=[a])
|
447 |
+
>>> mb.build_model()
|
448 |
+
True
|
449 |
+
|
450 |
+
Show the model in 'tabular' format.
|
451 |
+
|
452 |
+
>>> print(mb.model(format='tabular'))
|
453 |
+
% number = 1
|
454 |
+
% seconds = 0
|
455 |
+
<BLANKLINE>
|
456 |
+
% Interpretation of size 2
|
457 |
+
<BLANKLINE>
|
458 |
+
john : 0
|
459 |
+
<BLANKLINE>
|
460 |
+
mary : 1
|
461 |
+
<BLANKLINE>
|
462 |
+
see :
|
463 |
+
| 0 1
|
464 |
+
---+----
|
465 |
+
0 | 0 0
|
466 |
+
1 | 1 0
|
467 |
+
<BLANKLINE>
|
468 |
+
|
469 |
+
Show the model in 'tabular' format.
|
470 |
+
|
471 |
+
>>> print(mb.model(format='cooked'))
|
472 |
+
% number = 1
|
473 |
+
% seconds = 0
|
474 |
+
<BLANKLINE>
|
475 |
+
% Interpretation of size 2
|
476 |
+
<BLANKLINE>
|
477 |
+
john = 0.
|
478 |
+
<BLANKLINE>
|
479 |
+
mary = 1.
|
480 |
+
<BLANKLINE>
|
481 |
+
- see(0,0).
|
482 |
+
- see(0,1).
|
483 |
+
see(1,0).
|
484 |
+
- see(1,1).
|
485 |
+
<BLANKLINE>
|
486 |
+
|
487 |
+
The property ``valuation`` accesses the stored ``Valuation``.
|
488 |
+
|
489 |
+
>>> print(mb.valuation)
|
490 |
+
{'john': 'a', 'mary': 'b', 'see': {('b', 'a')}}
|
491 |
+
|
492 |
+
We can return to our earlier example and inspect the model:
|
493 |
+
|
494 |
+
>>> mb = MaceCommand(g, assumptions=[a4, a5, a6])
|
495 |
+
>>> m = mb.build_model()
|
496 |
+
>>> print(mb.model(format='cooked'))
|
497 |
+
% number = 1
|
498 |
+
% seconds = 0
|
499 |
+
<BLANKLINE>
|
500 |
+
% Interpretation of size 2
|
501 |
+
<BLANKLINE>
|
502 |
+
adam = 0.
|
503 |
+
<BLANKLINE>
|
504 |
+
eve = 0.
|
505 |
+
<BLANKLINE>
|
506 |
+
c1 = 1.
|
507 |
+
<BLANKLINE>
|
508 |
+
man(0).
|
509 |
+
- man(1).
|
510 |
+
<BLANKLINE>
|
511 |
+
woman(0).
|
512 |
+
woman(1).
|
513 |
+
<BLANKLINE>
|
514 |
+
- love(0,0).
|
515 |
+
love(0,1).
|
516 |
+
- love(1,0).
|
517 |
+
- love(1,1).
|
518 |
+
<BLANKLINE>
|
519 |
+
|
520 |
+
Here, we can see that ``adam`` and ``eve`` have been assigned the same
|
521 |
+
individual, namely ``0`` as value; ``0`` is both a man and a woman; a second
|
522 |
+
individual ``1`` is also a woman; and ``0`` loves ``1``. Thus, this is
|
523 |
+
an interpretation in which there is a woman that every man loves but
|
524 |
+
Adam doesn't love Eve.
|
525 |
+
|
526 |
+
Mace can also be used with propositional logic.
|
527 |
+
|
528 |
+
>>> p = read_expr('P')
|
529 |
+
>>> q = read_expr('Q')
|
530 |
+
>>> mb = MaceCommand(q, [p, p>-q])
|
531 |
+
>>> mb.build_model()
|
532 |
+
True
|
533 |
+
>>> mb.valuation['P']
|
534 |
+
True
|
535 |
+
>>> mb.valuation['Q']
|
536 |
+
False
|
venv/lib/python3.10/site-packages/nltk/test/internals.doctest
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
2 |
+
.. For license information, see LICENSE.TXT
|
3 |
+
|
4 |
+
==========================================
|
5 |
+
Unit tests for the nltk.utilities module
|
6 |
+
==========================================
|
7 |
+
|
8 |
+
overridden()
|
9 |
+
~~~~~~~~~~~~
|
10 |
+
>>> from nltk.internals import overridden
|
11 |
+
|
12 |
+
The typical use case is in defining methods for an interface or
|
13 |
+
abstract base class, in such a way that subclasses don't have to
|
14 |
+
implement all of the methods:
|
15 |
+
|
16 |
+
>>> class EaterI(object):
|
17 |
+
... '''Subclass must define eat() or batch_eat().'''
|
18 |
+
... def eat(self, food):
|
19 |
+
... if overridden(self.batch_eat):
|
20 |
+
... return self.batch_eat([food])[0]
|
21 |
+
... else:
|
22 |
+
... raise NotImplementedError()
|
23 |
+
... def batch_eat(self, foods):
|
24 |
+
... return [self.eat(food) for food in foods]
|
25 |
+
|
26 |
+
As long as a subclass implements one method, it will be used to
|
27 |
+
perform the other method:
|
28 |
+
|
29 |
+
>>> class GoodEater1(EaterI):
|
30 |
+
... def eat(self, food):
|
31 |
+
... return 'yum'
|
32 |
+
>>> GoodEater1().eat('steak')
|
33 |
+
'yum'
|
34 |
+
>>> GoodEater1().batch_eat(['steak', 'peas'])
|
35 |
+
['yum', 'yum']
|
36 |
+
|
37 |
+
>>> class GoodEater2(EaterI):
|
38 |
+
... def batch_eat(self, foods):
|
39 |
+
... return ['yum' for food in foods]
|
40 |
+
>>> GoodEater2().eat('steak')
|
41 |
+
'yum'
|
42 |
+
>>> GoodEater2().batch_eat(['steak', 'peas'])
|
43 |
+
['yum', 'yum']
|
44 |
+
|
45 |
+
But if a subclass doesn't implement either one, then they'll get an
|
46 |
+
error when they try to call them. (nb this is better than infinite
|
47 |
+
recursion):
|
48 |
+
|
49 |
+
>>> class BadEater1(EaterI):
|
50 |
+
... pass
|
51 |
+
>>> BadEater1().eat('steak')
|
52 |
+
Traceback (most recent call last):
|
53 |
+
. . .
|
54 |
+
NotImplementedError
|
55 |
+
>>> BadEater1().batch_eat(['steak', 'peas'])
|
56 |
+
Traceback (most recent call last):
|
57 |
+
. . .
|
58 |
+
NotImplementedError
|
59 |
+
|
60 |
+
Trying to use the abstract base class itself will also result in an
|
61 |
+
error:
|
62 |
+
|
63 |
+
>>> class EaterI(EaterI):
|
64 |
+
... pass
|
65 |
+
>>> EaterI().eat('steak')
|
66 |
+
Traceback (most recent call last):
|
67 |
+
. . .
|
68 |
+
NotImplementedError
|
69 |
+
>>> EaterI().batch_eat(['steak', 'peas'])
|
70 |
+
Traceback (most recent call last):
|
71 |
+
. . .
|
72 |
+
NotImplementedError
|
73 |
+
|
74 |
+
It's ok to use intermediate abstract classes:
|
75 |
+
|
76 |
+
>>> class AbstractEater(EaterI):
|
77 |
+
... pass
|
78 |
+
|
79 |
+
>>> class GoodEater3(AbstractEater):
|
80 |
+
... def eat(self, food):
|
81 |
+
... return 'yum'
|
82 |
+
...
|
83 |
+
>>> GoodEater3().eat('steak')
|
84 |
+
'yum'
|
85 |
+
>>> GoodEater3().batch_eat(['steak', 'peas'])
|
86 |
+
['yum', 'yum']
|
87 |
+
|
88 |
+
>>> class GoodEater4(AbstractEater):
|
89 |
+
... def batch_eat(self, foods):
|
90 |
+
... return ['yum' for food in foods]
|
91 |
+
>>> GoodEater4().eat('steak')
|
92 |
+
'yum'
|
93 |
+
>>> GoodEater4().batch_eat(['steak', 'peas'])
|
94 |
+
['yum', 'yum']
|
95 |
+
|
96 |
+
>>> class BadEater2(AbstractEater):
|
97 |
+
... pass
|
98 |
+
>>> BadEater2().eat('steak')
|
99 |
+
Traceback (most recent call last):
|
100 |
+
. . .
|
101 |
+
NotImplementedError
|
102 |
+
>>> BadEater2().batch_eat(['steak', 'peas'])
|
103 |
+
Traceback (most recent call last):
|
104 |
+
. . .
|
105 |
+
NotImplementedError
|
106 |
+
|
107 |
+
Here's some extra tests:
|
108 |
+
|
109 |
+
>>> class A(object):
|
110 |
+
... def f(x): pass
|
111 |
+
>>> class B(A):
|
112 |
+
... def f(x): pass
|
113 |
+
>>> class C(A): pass
|
114 |
+
>>> class D(B): pass
|
115 |
+
|
116 |
+
>>> overridden(A().f)
|
117 |
+
False
|
118 |
+
>>> overridden(B().f)
|
119 |
+
True
|
120 |
+
>>> overridden(C().f)
|
121 |
+
False
|
122 |
+
>>> overridden(D().f)
|
123 |
+
True
|
124 |
+
|
125 |
+
It works for classic classes, too:
|
126 |
+
|
127 |
+
>>> class A:
|
128 |
+
... def f(x): pass
|
129 |
+
>>> class B(A):
|
130 |
+
... def f(x): pass
|
131 |
+
>>> class C(A): pass
|
132 |
+
>>> class D(B): pass
|
133 |
+
>>> overridden(A().f)
|
134 |
+
False
|
135 |
+
>>> overridden(B().f)
|
136 |
+
True
|
137 |
+
>>> overridden(C().f)
|
138 |
+
False
|
139 |
+
>>> overridden(D().f)
|
140 |
+
True
|
141 |
+
|
142 |
+
|
143 |
+
read_str()
|
144 |
+
~~~~~~~~~~~~
|
145 |
+
>>> from nltk.internals import read_str
|
146 |
+
|
147 |
+
Test valid scenarios
|
148 |
+
|
149 |
+
>>> read_str("'valid string'", 0)
|
150 |
+
('valid string', 14)
|
151 |
+
|
152 |
+
Now test invalid scenarios
|
153 |
+
|
154 |
+
>>> read_str("should error", 0)
|
155 |
+
Traceback (most recent call last):
|
156 |
+
...
|
157 |
+
nltk.internals.ReadError: Expected open quote at 0
|
158 |
+
>>> read_str("'should error", 0)
|
159 |
+
Traceback (most recent call last):
|
160 |
+
...
|
161 |
+
nltk.internals.ReadError: Expected close quote at 1
|