python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Fonduer paragraph mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Paragraph
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryParagraphMention(TemporaryContext):
"""The TemporaryContext version of ParagraphMention."""
def __init__(self, paragraph: Paragraph) -> None:
"""Initialize TemporaryParagraphMention."""
super().__init__()
self.paragraph = paragraph # The paragraph Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryParagraphMention):
return NotImplemented
return self.paragraph == other.paragraph
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryParagraphMention):
return NotImplemented
return self.paragraph != other.paragraph
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryParagraphMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryParagraphMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.paragraph)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(
self.paragraph, self._get_polymorphic_identity(), 0, 0
)
def _get_table(self) -> Type["ParagraphMention"]:
return ParagraphMention
def _get_polymorphic_identity(self) -> str:
return "paragraph_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"paragraph_id": self.paragraph.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"document={self.paragraph.document.name}, "
f"position={self.paragraph.position}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryParagraphMention":
return TemporaryParagraphMention(**kwargs)
class ParagraphMention(Context, TemporaryParagraphMention):
"""A paragraph ``Mention``."""
__tablename__ = "paragraph_mention"
#: The unique id of the ``ParagraphMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Paragraph``.
paragraph_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Paragraph``.
paragraph = relationship("Context", foreign_keys=paragraph_id)
__table_args__ = (UniqueConstraint(paragraph_id),)
__mapper_args__ = {
"polymorphic_identity": "paragraph_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryParagraphMention):
"""Initialize ParagraphMention."""
self.stable_id = tc.get_stable_id()
self.paragraph = tc.paragraph
| fonduer-master | src/fonduer/candidates/models/paragraph_mention.py |
"""Fonduer implicit span mention model."""
from typing import Any, Dict, List, Optional, Type
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql import text
from sqlalchemy.types import PickleType
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.parser.models.context import Context
from fonduer.parser.models.sentence import Sentence
from fonduer.parser.models.utils import split_stable_id
class TemporaryImplicitSpanMention(TemporarySpanMention):
"""The TemporaryContext version of ImplicitSpanMention."""
def __init__(
self,
sentence: Sentence,
char_start: int,
char_end: int,
expander_key: str,
position: int,
text: str,
words: List[str],
lemmas: List[str],
pos_tags: List[str],
ner_tags: List[str],
dep_parents: List[int],
dep_labels: List[str],
page: List[Optional[int]],
top: List[Optional[int]],
left: List[Optional[int]],
bottom: List[Optional[int]],
right: List[Optional[int]],
meta: Any = None,
) -> None:
"""Initialize TemporaryImplicitSpanMention."""
super().__init__(sentence, char_start, char_end, meta)
self.expander_key = expander_key
self.position = position
self.text = text
self.words = words
self.lemmas = lemmas
self.pos_tags = pos_tags
self.ner_tags = ner_tags
self.dep_parents = dep_parents
self.dep_labels = dep_labels
self.page = page
self.top = top
self.left = left
self.bottom = bottom
self.right = right
def __len__(self) -> int:
"""Get the length of the mention."""
return sum(map(len, self.words))
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryImplicitSpanMention):
return NotImplemented
return (
self.sentence == other.sentence
and self.char_start == other.char_start
and self.char_end == other.char_end
and self.expander_key == other.expander_key
and self.position == other.position
)
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryImplicitSpanMention):
return NotImplemented
return (
self.sentence != other.sentence
or self.char_start != other.char_start
or self.char_end != other.char_end
or self.expander_key != other.expander_key
or self.position != other.position
)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return (
hash(self.sentence)
+ hash(self.char_start)
+ hash(self.char_end)
+ hash(self.expander_key)
+ hash(self.position)
)
def get_stable_id(self) -> str:
"""Return a stable id."""
doc_id, _, idx = split_stable_id(self.sentence.stable_id)
parent_doc_char_start = idx[0]
return (
f"{self.sentence.document.name}"
f"::"
f"{self._get_polymorphic_identity()}"
f":"
f"{parent_doc_char_start + self.char_start}"
f":"
f"{parent_doc_char_start + self.char_end}"
f":"
f"{self.expander_key}"
f":"
f"{self.position}"
)
def _get_table(self) -> Type["ImplicitSpanMention"]:
return ImplicitSpanMention
def _get_polymorphic_identity(self) -> str:
return "implicit_span_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {
"sentence_id": self.sentence.id,
"char_start": self.char_start,
"char_end": self.char_end,
"expander_key": self.expander_key,
"position": self.position,
"text": self.text,
"words": self.words,
"lemmas": self.lemmas,
"pos_tags": self.pos_tags,
"ner_tags": self.ner_tags,
"dep_parents": self.dep_parents,
"dep_labels": self.dep_labels,
"page": self.page,
"top": self.top,
"left": self.left,
"bottom": self.bottom,
"right": self.right,
"meta": self.meta,
}
def get_attrib_tokens(self, a: str = "words") -> List:
"""Get the tokens of sentence attribute *a*.
Intuitively, like calling::
implicit_span.a
:param a: The attribute to get tokens for.
:return: The tokens of sentence attribute defined by *a* for the span.
"""
return self.__getattribute__(a)
def get_attrib_span(self, a: str, sep: str = "") -> str:
"""Get the span of sentence attribute *a*.
Intuitively, like calling::
sep.join(implicit_span.a)
:param a: The attribute to get a span for.
:param sep: The separator to use for the join,
or to be removed from text if a="words".
:return: The joined tokens, or text if a="words".
"""
if a == "words":
return self.text.replace(sep, "")
else:
return sep.join([str(n) for n in self.get_attrib_tokens(a)])
def __getitem__(self, key: slice) -> "TemporaryImplicitSpanMention":
"""Slice operation returns a new candidate sliced according to **char index**.
Note that the slicing is w.r.t. the candidate range (not the abs.
sentence char indexing)
"""
if isinstance(key, slice):
char_start = (
self.char_start if key.start is None else self.char_start + key.start
)
if key.stop is None:
char_end = self.char_end
elif key.stop >= 0:
char_end = self.char_start + key.stop - 1
else:
char_end = self.char_end + key.stop
return self._get_instance(
sentence=self.sentence,
char_start=char_start,
char_end=char_end,
expander_key=self.expander_key,
position=self.position,
text=text,
words=self.words,
lemmas=self.lemmas,
pos_tags=self.pos_tags,
ner_tags=self.ner_tags,
dep_parents=self.dep_parents,
dep_labels=self.dep_labels,
page=self.page,
top=self.top,
left=self.left,
bottom=self.bottom,
right=self.right,
meta=self.meta,
)
else:
raise NotImplementedError()
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f'"{self.get_span()}", '
f"sentence={self.sentence.id}, "
f"words=[{self.get_word_start_index()},{self.get_word_end_index()}], "
f"position=[{self.position}]"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryImplicitSpanMention":
return TemporaryImplicitSpanMention(**kwargs)
class ImplicitSpanMention(Context, TemporaryImplicitSpanMention):
"""A span of characters that may not appear verbatim in the source text.
It is identified by Context id, character-index start and end (inclusive),
as well as a key representing what 'expander' function drew the ImplicitSpanMention
from an existing SpanMention, and a position (where position=0 corresponds to the
first ImplicitSpanMention produced from the expander function).
The character-index start and end point to the segment of text that was
expanded to produce the ImplicitSpanMention.
"""
__tablename__ = "implicit_span_mention"
#: The unique id of the ``ImplicitSpanMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Sentence``.
sentence_id = Column(
Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True
)
#: The parent ``Sentence``.
sentence = relationship(
"Context",
backref=backref("implicit_spans", cascade="all, delete-orphan"),
foreign_keys=sentence_id,
)
#: The starting character-index of the ``ImplicitSpanMention``.
char_start = Column(Integer, nullable=False)
#: The ending character-index of the ``ImplicitSpanMention`` (inclusive).
char_end = Column(Integer, nullable=False)
#: The key representing the expander function which produced this
# ``ImplicitSpanMention``.
expander_key = Column(String, nullable=False)
#: The position of the ``ImplicitSpanMention`` where position=0 is the first
#: ``ImplicitSpanMention`` produced by the expander.
position = Column(Integer, nullable=False)
#: The raw text of the ``ImplicitSpanMention``.
text = Column(String)
#: A list of the words in the ``ImplicitSpanMention``.
words = Column(postgresql.ARRAY(String), nullable=False)
#: A list of the lemmas for each word in the ``ImplicitSpanMention``.
lemmas = Column(postgresql.ARRAY(String))
#: A list of the POS tags for each word in the ``ImplicitSpanMention``.
pos_tags = Column(postgresql.ARRAY(String))
#: A list of the NER tags for each word in the ``ImplicitSpanMention``.
ner_tags = Column(postgresql.ARRAY(String))
#: A list of the dependency parents for each word in the ``ImplicitSpanMention``.
dep_parents = Column(postgresql.ARRAY(Integer))
#: A list of the dependency labels for each word in the ``ImplicitSpanMention``.
dep_labels = Column(postgresql.ARRAY(String))
#: A list of the page number each word in the ``ImplicitSpanMention``.
page = Column(postgresql.ARRAY(Integer))
#: A list of each word's TOP bounding box coordinate in the
# ``ImplicitSpanMention``.
top = Column(postgresql.ARRAY(Integer))
#: A list of each word's LEFT bounding box coordinate in the
# ``ImplicitSpanMention``.
left = Column(postgresql.ARRAY(Integer))
#: A list of each word's BOTTOM bounding box coordinate in the
# ``ImplicitSpanMention``.
bottom = Column(postgresql.ARRAY(Integer))
#: A list of each word's RIGHT bounding box coordinate in the
# ``ImplicitSpanMention``.
right = Column(postgresql.ARRAY(Integer))
#: Pickled metadata about the ``ImplicitSpanMention``.
meta = Column(PickleType)
__table_args__ = (
UniqueConstraint(sentence_id, char_start, char_end, expander_key, position),
)
__mapper_args__ = {
"polymorphic_identity": "implicit_span_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryImplicitSpanMention):
"""Initialize ImplicitSpanMention."""
self.stable_id = tc.get_stable_id()
self.sentence = tc.sentence
self.char_start = tc.char_start
self.char_end = tc.char_end
self.expander_key = tc.expander_key
self.position = tc.position
self.text = tc.text
self.words = tc.words
self.lemmas = tc.lemmas
self.pos_tags = tc.pos_tags
self.ner_tags = tc.ner_tags
self.dep_parents = tc.dep_parents
self.dep_labels = tc.dep_labels
self.page = tc.page
self.top = tc.top
self.left = tc.left
self.bottom = tc.bottom
self.right = tc.right
self.meta = tc.meta
def _get_instance(self, **kwargs: Any) -> "ImplicitSpanMention":
return ImplicitSpanMention(**kwargs)
# We redefine these to use default semantics, overriding the operators
# inherited from TemporarySpan
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, ImplicitSpanMention):
return NotImplemented
return self is other
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, ImplicitSpanMention):
return NotImplemented
return self is not other
def __hash__(self) -> int:
"""Get the hash value of mention."""
return id(self)
| fonduer-master | src/fonduer/candidates/models/implicit_span_mention.py |
"""Fonduer's candidate model module."""
from fonduer.candidates.models.candidate import Candidate, candidate_subclass
from fonduer.candidates.models.caption_mention import CaptionMention
from fonduer.candidates.models.cell_mention import CellMention
from fonduer.candidates.models.document_mention import DocumentMention
from fonduer.candidates.models.figure_mention import FigureMention
from fonduer.candidates.models.implicit_span_mention import ImplicitSpanMention
from fonduer.candidates.models.mention import Mention, mention_subclass
from fonduer.candidates.models.paragraph_mention import ParagraphMention
from fonduer.candidates.models.section_mention import SectionMention
from fonduer.candidates.models.span_mention import SpanMention
from fonduer.candidates.models.table_mention import TableMention
__all__ = [
"Candidate",
"CaptionMention",
"CellMention",
"DocumentMention",
"FigureMention",
"ImplicitSpanMention",
"Mention",
"ParagraphMention",
"SectionMention",
"SpanMention",
"TableMention",
"candidate_subclass",
"mention_subclass",
]
| fonduer-master | src/fonduer/candidates/models/__init__.py |
"""Fonduer caption mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Caption
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryCaptionMention(TemporaryContext):
"""The TemporaryContext version of CaptionMention."""
def __init__(self, caption: Caption) -> None:
"""Initialize TemporaryCaptionMention."""
super().__init__()
self.caption = caption # The caption Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryCaptionMention):
return NotImplemented
return self.caption == other.caption
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryCaptionMention):
return NotImplemented
return self.caption != other.caption
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryCaptionMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryCaptionMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.caption)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(self.caption, self._get_polymorphic_identity(), 0, 0)
def _get_table(self) -> Type["CaptionMention"]:
return CaptionMention
def _get_polymorphic_identity(self) -> str:
return "caption_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"caption_id": self.caption.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}("
f"document={self.caption.document.name}, "
f"position={self.caption.position}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryCaptionMention":
return TemporaryCaptionMention(**kwargs)
class CaptionMention(Context, TemporaryCaptionMention):
"""A caption ``Mention``."""
__tablename__ = "caption_mention"
#: The unique id of the ``CaptionMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Caption``.
caption_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Caption``.
caption = relationship("Context", foreign_keys=caption_id)
__table_args__ = (UniqueConstraint(caption_id),)
__mapper_args__ = {
"polymorphic_identity": "caption_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryCaptionMention):
"""Initialize CaptionMention."""
self.stable_id = tc.get_stable_id()
self.caption = tc.caption
| fonduer-master | src/fonduer/candidates/models/caption_mention.py |
"""Fonduer span mention model."""
from typing import Any, Dict, List, Optional, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from sqlalchemy.types import PickleType
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models.context import Context
from fonduer.parser.models.sentence import Sentence
from fonduer.parser.models.utils import construct_stable_id
from fonduer.utils.utils_visual import Bbox
class TemporarySpanMention(TemporaryContext):
"""The TemporaryContext version of Span."""
def __init__(
self,
sentence: Sentence,
char_start: int,
char_end: int,
meta: Optional[Any] = None,
) -> None:
"""Initialize TemporarySpanMention."""
super().__init__()
self.sentence = sentence # The sentence Context of the Span
self.char_start = char_start
self.char_end = char_end
self.meta = meta
def __len__(self) -> int:
"""Get the length of the mention."""
return self.char_end - self.char_start + 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporarySpanMention):
return NotImplemented
return (
self.sentence == other.sentence
and self.char_start == other.char_start
and self.char_end == other.char_end
)
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporarySpanMention):
return NotImplemented
return (
self.sentence != other.sentence
or self.char_start != other.char_start
or self.char_end != other.char_end
)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.sentence) + hash(self.char_start) + hash(self.char_end)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(
self.sentence,
self._get_polymorphic_identity(),
self.char_start,
self.char_end,
)
def _get_table(self) -> Type["SpanMention"]:
return SpanMention
def _get_polymorphic_identity(self) -> str:
return "span_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {
"sentence_id": self.sentence.id,
"char_start": self.char_start,
"char_end": self.char_end,
"meta": self.meta,
}
def get_word_start_index(self) -> int:
"""Get the index of the starting word of the span.
:return: The word-index of the start of the span.
"""
return self._char_to_word_index(self.char_start)
def get_word_end_index(self) -> int:
"""Get the index of the ending word of the span.
:return: The word-index of the last word of the span.
"""
return self._char_to_word_index(self.char_end)
def get_num_words(self) -> int:
"""Get the number of words in the span.
:return: The number of words in the span (n of the ngrams).
"""
return self.get_word_end_index() - self.get_word_start_index() + 1
def _char_to_word_index(self, ci: int) -> int:
"""Return the index of the **word this char is in**.
:param ci: The character-level index of the char.
:return: The word-level index the char was in.
"""
i = None
for i, co in enumerate(self.sentence.char_offsets):
if ci == co:
return i
elif ci < co:
return i - 1
return i
def _word_to_char_index(self, wi: int) -> int:
"""Return the character-level index (offset) of the word's start.
:param wi: The word-index.
:return: The character-level index of the word's start.
"""
return self.sentence.char_offsets[wi]
def get_attrib_tokens(self, a: str = "words") -> List:
"""Get the tokens of sentence attribute *a*.
Intuitively, like calling::
span.a
:param a: The attribute to get tokens for.
:return: The tokens of sentence attribute defined by *a* for the span.
"""
return self.sentence.__getattribute__(a)[
self.get_word_start_index() : self.get_word_end_index() + 1
]
def get_attrib_span(self, a: str, sep: str = "") -> str:
"""Get the span of sentence attribute *a*.
Intuitively, like calling::
sep.join(span.a)
:param a: The attribute to get a span for.
:param sep: The separator to use for the join,
or to be removed from text if a="words".
:return: The joined tokens, or text if a="words".
"""
# NOTE: Special behavior for words currently (due to correspondence
# with char_offsets)
if a == "words":
return self.sentence.text[self.char_start : self.char_end + 1].replace(
sep, ""
)
else:
return sep.join([str(n) for n in self.get_attrib_tokens(a)])
def get_span(self) -> str:
"""Return the text of the ``Span``.
:return: The text of the ``Span``.
"""
return self.get_attrib_span("words")
def get_bbox(self) -> Bbox:
"""Get the bounding box."""
if self.sentence.is_visual():
return Bbox(
self.get_attrib_tokens("page")[0],
min(self.get_attrib_tokens("top")),
max(self.get_attrib_tokens("bottom")),
min(self.get_attrib_tokens("left")),
max(self.get_attrib_tokens("right")),
)
else:
return None
def __contains__(self, other_span: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other_span, TemporarySpanMention):
return NotImplemented
return (
self.sentence == other_span.sentence
and other_span.char_start >= self.char_start
and other_span.char_end <= self.char_end
)
def __getitem__(self, key: slice) -> "TemporarySpanMention":
"""Slice operation returns a new candidate sliced according to **char index**.
Note that the slicing is w.r.t. the candidate range (not the abs.
sentence char indexing).
"""
if isinstance(key, slice):
char_start = (
self.char_start if key.start is None else self.char_start + key.start
)
if key.stop is None:
char_end = self.char_end
elif key.stop >= 0:
char_end = self.char_start + key.stop - 1
else:
char_end = self.char_end + key.stop
return self._get_instance(
char_start=char_start, char_end=char_end, sentence=self.sentence
)
else:
raise NotImplementedError()
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f'"{self.get_span()}", '
f"sentence={self.sentence.id}, "
f"chars=[{self.char_start},{self.char_end}], "
f"words=[{self.get_word_start_index()},{self.get_word_end_index()}]"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporarySpanMention":
return TemporarySpanMention(**kwargs)
class SpanMention(Context, TemporarySpanMention):
"""
A span of chars, identified by Context ID and char-index start, end (inclusive).
char_offsets are **relative to the Context start**
"""
__tablename__ = "span_mention"
#: The unique id of the ``SpanMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Sentence``.
sentence_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Sentence``.
sentence = relationship(
"Context",
backref=backref("spans", cascade="all, delete-orphan"),
foreign_keys=sentence_id,
)
#: The starting character-index of the ``SpanMention``.
char_start = Column(Integer, nullable=False)
#: The ending character-index of the ``SpanMention`` (inclusive).
char_end = Column(Integer, nullable=False)
#: Pickled metadata about the ``ImplicitSpanMention``.
meta = Column(PickleType)
__table_args__ = (UniqueConstraint(sentence_id, char_start, char_end),)
__mapper_args__ = {
"polymorphic_identity": "span_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporarySpanMention):
"""Initialize SpanMention."""
self.stable_id = tc.get_stable_id()
self.sentence = tc.sentence
self.char_start = tc.char_start
self.char_end = tc.char_end
self.meta = tc.meta
def _get_instance(self, **kwargs: Any) -> "SpanMention":
return SpanMention(**kwargs)
# We redefine these to use default semantics, overriding the operators
# inherited from TemporarySpanMention
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, SpanMention):
return NotImplemented
return self is other
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, SpanMention):
return NotImplemented
return self is not other
def __hash__(self) -> int:
"""Get the hash value of mention."""
return id(self)
| fonduer-master | src/fonduer/candidates/models/span_mention.py |
"""Fonduer candidate model."""
import logging
from typing import Any, Dict, List, Optional, Tuple, Type
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.candidates.models.mention import Mention
from fonduer.meta import Meta
from fonduer.utils.utils import camel_to_under
logger = logging.getLogger(__name__)
# This global dictionary contains all classes that have been declared in this
# Python environment, so that candidate_subclass() can return a class if it
# already exists and is identical in specification to the requested class
candidate_subclasses: Dict[str, Tuple] = {}
class Candidate(Meta.Base):
"""
An abstract candidate relation.
New relation types should be defined by calling candidate_subclass(),
**not** subclassing this class directly.
"""
__tablename__ = "candidate"
#: The unique id for the ``Candidate``.
id = Column(Integer, primary_key=True)
#: The type for the ``Candidate``, which corresponds to the names the user
#: gives to the candidate_subclasses.
type = Column(String, nullable=False)
#: Which split the ``Candidate`` belongs to. Used to organize train/dev/test.
split = Column(Integer, nullable=False, default=0, index=True)
__mapper_args__ = {"polymorphic_identity": "candidate", "polymorphic_on": type}
# __table_args__ = {"extend_existing" : True}
def get_mentions(self) -> Tuple[Mention, ...]:
"""Get a tuple of the constituent ``Mentions`` making up this ``Candidate``."""
return tuple(getattr(self, name) for name in self.__argnames__)
def __len__(self) -> int:
"""Get the length of the candidate."""
return len(self.__argnames__)
def __getitem__(self, key: int) -> Mention:
"""Get the mention from candidate."""
return self.get_mentions()[key]
def __repr__(self) -> str:
"""Represent the candidate as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"{', '.join(map(str, self.get_mentions()))}"
f")"
)
def __gt__(self, other_cand: "Candidate") -> bool:
"""Check if the candidate is greater than another candidate."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other_cand.__repr__()
def candidate_subclass(
class_name: str,
args: List[Mention],
table_name: Optional[str] = None,
cardinality: Optional[int] = None,
values: Optional[List[Any]] = None,
nullables: Optional[List[bool]] = None,
) -> Type[Candidate]:
"""Create new relation.
Creates and returns a Candidate subclass with provided argument names,
which are Context type. Creates the table in DB if does not exist yet.
Import using:
.. code-block:: python
from fonduer.candidates.models import candidate_subclass
:param class_name: The name of the class, should be "camel case" e.g.
NewCandidate
:param args: A list of names of constituent arguments, which refer to the
Contexts--representing mentions--that comprise the candidate
:param table_name: The name of the corresponding table in DB; if not
provided, is converted from camel case by default, e.g. new_candidate
:param cardinality: The cardinality of the variable corresponding to the
Candidate. By default is 2 i.e. is a binary value, e.g. is or is not
a true mention.
:param values: A list of values a candidate can take as their label.
:param nullables: The number of nullables must match that of args.
If nullables[i]==True, a mention for ith mention subclass can be NULL.
If nullables=``None`` (by default), no mention can be NULL.
"""
if table_name is None:
table_name = camel_to_under(class_name)
# If cardinality and values are None, default to binary classification
if cardinality is None and values is None:
values = [True, False]
cardinality = 2
# Else use values if present, and validate proper input
elif values is not None:
if cardinality is not None and len(values) != cardinality:
raise ValueError("Number of values must match cardinality.")
if None in values:
raise ValueError("`None` is a protected value.")
# Note that bools are instances of ints in Python...
if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
raise ValueError(
(
"Default usage of values is consecutive integers."
"Leave values unset if trying to define values as integers."
)
)
cardinality = len(values)
# If cardinality is specified but not values, fill in with ints
elif cardinality is not None:
values = list(range(cardinality))
if nullables:
if len(nullables) != len(args):
raise ValueError("The number of nullables must match that of args.")
else:
nullables = [False] * len(args)
class_spec = (args, table_name, cardinality, values)
if class_name in candidate_subclasses:
if class_spec == candidate_subclasses[class_name][1]:
return candidate_subclasses[class_name][0]
else:
raise ValueError(
f"Candidate subclass {class_name} "
f"already exists in memory with incompatible "
f"specification: {candidate_subclasses[class_name][1]}"
)
else:
# Set the class attributes == the columns in the database
class_attribs = {
# Declares name for storage table
"__tablename__": table_name,
# Connects candidate_subclass records to generic Candidate records
"id": Column(
Integer,
ForeignKey("candidate.id", ondelete="CASCADE"),
primary_key=True,
),
# Store values & cardinality information in the class only
"values": values,
"cardinality": cardinality,
# Polymorphism information for SQLAlchemy
"__mapper_args__": {"polymorphic_identity": table_name},
# Helper method to get argument names
"__argnames__": [_.__tablename__ for _ in args],
"mentions": args,
"nullables": nullables,
}
class_attribs["document_id"] = Column(
Integer, ForeignKey("document.id", ondelete="CASCADE")
)
class_attribs["document"] = relationship(
"Document",
backref=backref(table_name + "s", cascade="all, delete-orphan"),
foreign_keys=class_attribs["document_id"],
)
# Create named arguments, i.e. the entity mentions comprising the
# relation mention.
unique_args = []
for arg, nullable in zip(args, nullables):
# Primary arguments are constituent Contexts, and their ids
class_attribs[arg.__tablename__ + "_id"] = Column(
Integer,
ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE"),
nullable=nullable,
)
class_attribs[arg.__tablename__] = relationship(
arg.__name__,
backref=backref(
table_name + "_" + arg.__tablename__ + "s",
cascade_backrefs=False,
cascade="all, delete-orphan",
),
cascade_backrefs=False,
foreign_keys=class_attribs[arg.__tablename__ + "_id"],
)
unique_args.append(class_attribs[arg.__tablename__ + "_id"])
# Add unique constraints to the arguments
class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),)
# Create class
C = type(class_name, (Candidate,), class_attribs)
# Create table in DB
if Meta.engine and not Meta.engine.has_table(table_name):
C.__table__.create(bind=Meta.engine) # type: ignore
candidate_subclasses[class_name] = C, class_spec
# Make this dynamically created class picklable
# https://stackoverflow.com/a/39529149
globals()[class_name] = C
return C
| fonduer-master | src/fonduer/candidates/models/candidate.py |
"""Fonduer section mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Section
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporarySectionMention(TemporaryContext):
"""The TemporaryContext version of SectionMention."""
def __init__(self, section: Section) -> None:
"""Initialize TemporarySectionMention."""
super().__init__()
self.section = section # The section Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporarySectionMention):
return NotImplemented
return self.section == other.section
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporarySectionMention):
return NotImplemented
return self.section != other.section
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporarySectionMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporarySectionMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.section)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(self.section, self._get_polymorphic_identity(), 0, 0)
def _get_table(self) -> Type["SectionMention"]:
return SectionMention
def _get_polymorphic_identity(self) -> str:
return "section_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"section_id": self.section.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"document={self.section.document.name}, "
f"position={self.section.position}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporarySectionMention":
return TemporarySectionMention(**kwargs)
class SectionMention(Context, TemporarySectionMention):
"""A section ``Mention``."""
__tablename__ = "section_mention"
#: The unique id of the ``SectionMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Section``.
section = relationship("Context", foreign_keys=section_id)
__table_args__ = (UniqueConstraint(section_id),)
__mapper_args__ = {
"polymorphic_identity": "section_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporarySectionMention):
"""Initialize SectionMention."""
self.stable_id = tc.get_stable_id()
self.section = tc.section
| fonduer-master | src/fonduer/candidates/models/section_mention.py |
"""Fonduer mention model."""
import logging
from typing import Any, Dict, List, Optional, Tuple, Type
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.meta import Meta
from fonduer.parser.models import Context
from fonduer.utils.utils import camel_to_under
logger = logging.getLogger(__name__)
# This global dictionary contains all classes that have been declared in this
# Python environment, so that mention_subclass() can return a class if it
# already exists and is identical in specification to the requested class.
mention_subclasses: Dict[str, Tuple] = {}
class Mention(Meta.Base):
"""
An abstract Mention.
New mention types should be defined by calling mention_subclass(),
**not** subclassing this class directly.
"""
__tablename__ = "mention"
#: The unique id of the ``Mention``.
id = Column(Integer, primary_key=True)
#: The type for the ``Mention``, which corresponds to the names the user
#: gives to the mention_subclass.
type = Column(String, nullable=False)
__mapper_args__ = {"polymorphic_identity": "mention", "polymorphic_on": type}
def get_contexts(self) -> Tuple[Context, ...]:
"""Get the constituent context making up this mention."""
return tuple(getattr(self, name) for name in self.__argnames__)
def __len__(self) -> int:
"""Get the length of the mention."""
return len(self.__argnames__)
def __getitem__(self, key: int) -> Context:
"""Get the context from mention."""
return self.get_contexts()[key]
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"{', '.join(map(str, self.get_contexts()))}"
f")"
)
def __gt__(self, other: "Mention") -> bool:
"""Check if the mention is greater than another mention."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def mention_subclass(
class_name: str,
cardinality: Optional[int] = None,
values: Optional[List[Any]] = None,
table_name: Optional[str] = None,
) -> Type[Mention]:
"""Create new mention.
Creates and returns a Mention subclass with provided argument names,
which are Context type. Creates the table in DB if does not exist yet.
Import using:
.. code-block:: python
from fonduer.candidates.models import mention_subclass
:param class_name: The name of the class, should be "camel case" e.g.
NewMention
:param table_name: The name of the corresponding table in DB; if not
provided, is converted from camel case by default, e.g. new_mention
:param values: The values that the variable corresponding to the Mention
can take. By default it will be [True, False].
:param cardinality: The cardinality of the variable corresponding to the
Mention. By default is 2 i.e. is a binary value, e.g. is or is not
a true mention.
"""
if table_name is None:
table_name = camel_to_under(class_name)
# If cardinality and values are None, default to binary classification
if cardinality is None and values is None:
values = [True, False]
cardinality = 2
# Else use values if present, and validate proper input
elif values is not None:
if cardinality is not None and len(values) != cardinality:
raise ValueError("Number of values must match cardinality.")
if None in values:
raise ValueError("`None` is a protected value.")
# Note that bools are instances of ints in Python...
if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
raise ValueError(
(
"Default usage of values is consecutive integers."
"Leave values unset if trying to define values as integers."
)
)
cardinality = len(values)
# If cardinality is specified but not values, fill in with ints
elif cardinality is not None:
values = list(range(cardinality))
args = ["context"]
class_spec = (args, table_name, cardinality, values)
if class_name in mention_subclasses:
if class_spec == mention_subclasses[class_name][1]:
return mention_subclasses[class_name][0]
else:
raise ValueError(
f"Mention subclass {class_name} "
f"already exists in memory with incompatible "
f"specification: {mention_subclasses[class_name][1]}"
)
else:
# Set the class attributes == the columns in the database
class_attribs = {
# Declares name for storage table
"__tablename__": table_name,
# Connects mention_subclass records to generic Mention records
"id": Column(
Integer, ForeignKey("mention.id", ondelete="CASCADE"), primary_key=True
),
# Store values & cardinality information in the class only
"values": values,
"cardinality": cardinality,
# Polymorphism information for SQLAlchemy
"__mapper_args__": {"polymorphic_identity": table_name},
# Helper method to get argument names
"__argnames__": args,
}
class_attribs["document_id"] = Column(
Integer, ForeignKey("document.id", ondelete="CASCADE")
)
class_attribs["document"] = relationship(
"Document",
backref=backref(table_name + "s", cascade="all, delete-orphan"),
foreign_keys=class_attribs["document_id"],
)
# Create named arguments, i.e. the entity mentions comprising the
# relation mention.
unique_args = []
for arg in args:
# Primary arguments are constituent Contexts, and their ids
class_attribs[arg + "_id"] = Column(
Integer, ForeignKey("context.id", ondelete="CASCADE")
)
class_attribs[arg] = relationship(
"Context", foreign_keys=class_attribs[arg + "_id"]
)
unique_args.append(class_attribs[arg + "_id"])
# Add unique constraints to the arguments
class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),)
# Create class
C = type(class_name, (Mention,), class_attribs)
# Create table in DB
if Meta.engine and not Meta.engine.has_table(table_name):
C.__table__.create(bind=Meta.engine) # type: ignore
mention_subclasses[class_name] = C, class_spec
# Make this dynamically created class picklable
# https://stackoverflow.com/a/39529149
globals()[class_name] = C
return C
| fonduer-master | src/fonduer/candidates/models/mention.py |
"""Fonduer document mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Document
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryDocumentMention(TemporaryContext):
"""The TemporaryContext version of DocumentMention."""
def __init__(self, document: Document) -> None:
"""Initialize TemporaryDocumentMention."""
super().__init__()
self.document = document # The document Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryDocumentMention):
return NotImplemented
return self.document == other.document
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryDocumentMention):
return NotImplemented
return self.document != other.document
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryDocumentMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryDocumentMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.document)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(
self.document, self._get_polymorphic_identity(), 0, 0
)
def _get_table(self) -> Type["DocumentMention"]:
return DocumentMention
def _get_polymorphic_identity(self) -> str:
return "document_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"document_id": self.document.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return f"{self.__class__.__name__}(document={self.document.name})"
def _get_instance(self, **kwargs: Any) -> "TemporaryDocumentMention":
return TemporaryDocumentMention(**kwargs)
class DocumentMention(Context, TemporaryDocumentMention):
"""A document ``Mention``."""
__tablename__ = "document_mention"
#: The unique id of the ``DocumentMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Document``.
document = relationship("Context", foreign_keys=document_id)
__table_args__ = (UniqueConstraint(document_id),)
__mapper_args__ = {
"polymorphic_identity": "document_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryDocumentMention):
"""Initialize DocumentMention."""
self.stable_id = tc.get_stable_id()
self.document = tc.document
| fonduer-master | src/fonduer/candidates/models/document_mention.py |
"""Fonduer temporary mention model."""
from builtins import object
from typing import Any, Dict, Type
from fonduer.parser.models.context import Context
class TemporaryContext(object):
"""Temporary Context class.
A context which does not incur the overhead of a proper ORM-based Context
object. The TemporaryContext class is specifically for the candidate
extraction process, during which a MentionSpace object will generate many
TemporaryContexts, which will then be filtered by Matchers prior to
materialization of Mentions and constituent Context objects.
Every Context object has a corresponding TemporaryContext object from which
it inherits.
A TemporaryContext must have specified equality / set membership semantics,
a stable_id for checking uniqueness against the database, and a promote()
method which returns a corresponding Context object.
"""
def __init__(self) -> None:
"""Initialize TemporaryContext."""
self.id = None
def __repr__(self) -> str:
"""Represent the mention as a string."""
raise NotImplementedError()
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryContext):
return NotImplemented
raise NotImplementedError()
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryContext):
return NotImplemented
raise NotImplementedError()
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryContext):
return NotImplemented
raise NotImplementedError()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryContext):
return NotImplemented
raise NotImplementedError()
def __hash__(self) -> int:
"""Get the hash value of mention."""
raise NotImplementedError()
def _get_polymorphic_identity(self) -> str:
raise NotImplementedError()
def _get_table(self) -> Type[Context]:
raise NotImplementedError()
def _get_insert_args(self) -> Dict[str, Any]:
raise NotImplementedError()
def get_stable_id(self) -> str:
"""Get the stable_id of TemporaryContext."""
raise NotImplementedError()
| fonduer-master | src/fonduer/candidates/models/temporary_context.py |
"""Fonduer cell mention model."""
from typing import Any, Dict, Type
from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import relationship
from fonduer.candidates.models.temporary_context import TemporaryContext
from fonduer.parser.models import Cell
from fonduer.parser.models.context import Context
from fonduer.parser.models.utils import construct_stable_id
class TemporaryCellMention(TemporaryContext):
"""The TemporaryContext version of CellMention."""
def __init__(self, cell: Cell) -> None:
"""Initialize TemporaryCellMention."""
super().__init__()
self.cell = cell # The cell Context
def __len__(self) -> int:
"""Get the length of the mention."""
return 1
def __eq__(self, other: object) -> bool:
"""Check if the mention is equal to another mention."""
if not isinstance(other, TemporaryCellMention):
return NotImplemented
return self.cell == other.cell
def __ne__(self, other: object) -> bool:
"""Check if the mention is not equal to another mention."""
if not isinstance(other, TemporaryCellMention):
return NotImplemented
return self.cell != other.cell
def __gt__(self, other: object) -> bool:
"""Check if the mention is greater than another mention."""
if not isinstance(other, TemporaryCellMention):
return NotImplemented
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
def __contains__(self, other: object) -> bool:
"""Check if the mention contains another mention."""
if not isinstance(other, TemporaryCellMention):
return NotImplemented
return self.__eq__(other)
def __hash__(self) -> int:
"""Get the hash value of mention."""
return hash(self.cell)
def get_stable_id(self) -> str:
"""Return a stable id."""
return construct_stable_id(self.cell, self._get_polymorphic_identity(), 0, 0)
def _get_table(self) -> Type["CellMention"]:
return CellMention
def _get_polymorphic_identity(self) -> str:
return "cell_mention"
def _get_insert_args(self) -> Dict[str, Any]:
return {"cell_id": self.cell.id}
def __repr__(self) -> str:
"""Represent the mention as a string."""
return (
f"{self.__class__.__name__}"
f"("
f"document={self.cell.document.name}, "
f"table_position={self.cell.table.position}, "
f"position={self.cell.position}"
f")"
)
def _get_instance(self, **kwargs: Any) -> "TemporaryCellMention":
return TemporaryCellMention(**kwargs)
class CellMention(Context, TemporaryCellMention):
"""A cell ``Mention``."""
__tablename__ = "cell_mention"
#: The unique id of the ``CellMention``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The id of the parent ``Cell``.
cell_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
#: The parent ``Cell``.
cell = relationship("Context", foreign_keys=cell_id)
__table_args__ = (UniqueConstraint(cell_id),)
__mapper_args__ = {
"polymorphic_identity": "cell_mention",
"inherit_condition": (id == Context.id),
}
def __init__(self, tc: TemporaryCellMention):
"""Initialize CellMention."""
self.stable_id = tc.get_stable_id()
self.cell = tc.cell
| fonduer-master | src/fonduer/candidates/models/cell_mention.py |
"""Customized MLflow model for Fonduer."""
import logging
import os
import sys
from io import BytesIO
from typing import Any, Callable, Dict, List, Optional, Union
import cloudpickle as pickle
import emmental
import numpy as np
import torch
import yaml
from emmental.model import EmmentalModel
from mlflow import pyfunc
from mlflow.models import Model
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from pandas import DataFrame
from scipy.sparse import csr_matrix
from snorkel.labeling.model import LabelModel
from fonduer import init_logging
from fonduer.candidates import CandidateExtractor, MentionExtractor
from fonduer.candidates.candidates import CandidateExtractorUDF
from fonduer.candidates.mentions import MentionExtractorUDF
from fonduer.candidates.models import (
Candidate,
Mention,
candidate_subclass,
mention_subclass,
)
from fonduer.candidates.models.mention import mention_subclasses
from fonduer.features.feature_extractors import FeatureExtractor
from fonduer.features.featurizer import Featurizer, FeaturizerUDF
from fonduer.parser import Parser
from fonduer.parser.models import Document
from fonduer.parser.parser import ParserUDF
from fonduer.parser.preprocessors import DocPreprocessor
from fonduer.supervision.labeler import Labeler, LabelerUDF
from fonduer.utils.utils_udf import _convert_mappings_to_matrix, unshift_label_matrix
logger = logging.getLogger(__name__)
MODEL_TYPE = "model_type"
class FonduerModel(pyfunc.PythonModel):
"""A custom MLflow model for Fonduer.
This class is intended to be subclassed.
"""
def _classify(self, doc: Document) -> DataFrame:
"""Classify candidates by an Emmental model (or by a label model)."""
raise NotImplementedError()
def predict(self, model_input: DataFrame) -> DataFrame:
"""Take html_path (and pdf_path) as input and return extracted information.
This method is required and its signature is defined by the MLflow's convention.
See MLflow_ for more details.
.. _MLflow:
https://www.mlflow.org/docs/latest/models.html#python-function-python-function
:param model_input: Pandas DataFrame with rows as docs and colums as params.
params should include "html_path" and can optionally include "pdf_path".
:return: Pandas DataFrame containing the output from :func:`_classify`, which
depends on how it is implemented by a subclass.
"""
df = DataFrame()
for index, row in model_input.iterrows():
output = self._process(
row["html_path"], row["pdf_path"] if "pdf_path" in row.keys() else None
)
output["html_path"] = row["html_path"]
df = df.append(output)
return df
def _process(self, html_path: str, pdf_path: Optional[str] = None) -> DataFrame:
"""Run the whole pipeline of Fonduer.
:param html_path: a path of an HTML file or a directory containing files.
:param pdf_path: a path of a PDF file or a directory containing files.
"""
if not os.path.exists(html_path):
raise ValueError("html_path should be a file/directory path")
# Parse docs
doc = next(
self.preprocessor._parse_file(html_path, os.path.basename(html_path))
)
logger.info(f"Parsing {html_path}")
doc = self.parser.apply(doc, pdf_path=pdf_path)
logger.info(f"Extracting mentions from {html_path}")
doc = self.mention_extractor.apply(doc)
logger.info(f"Extracting candidates from {html_path}")
doc = self.candidate_extractor.apply(doc, split=2)
logger.info(f"Classifying candidates from {html_path}")
df = self._classify(doc)
return df
@staticmethod
def convert_features_to_matrix(
features: List[Dict[str, Any]], keys: List[str]
) -> csr_matrix:
"""Convert features (the output from FeaturizerUDF.apply) into a sparse matrix.
:param features: a list of feature mapping (key: key, value=feature).
:param keys: a list of all keys.
"""
return _convert_mappings_to_matrix(features, keys)
@staticmethod
def convert_labels_to_matrix(
labels: List[Dict[str, Any]], keys: List[str]
) -> np.ndarray:
"""Convert labels (the output from LabelerUDF.apply) into a dense matrix.
Note that the input labels are 0-indexed (``{0, 1, ..., k}``),
while the output labels are -1-indexed (``{-1, 0, ..., k-1}``).
:param labels: a list of label mapping (key: key, value=label).
:param keys: a list of all keys.
"""
return unshift_label_matrix(_convert_mappings_to_matrix(labels, keys))
def _load_pyfunc(model_path: str) -> Any:
"""Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``."""
# Load mention_classes
_load_mention_classes(model_path)
# Load candiate_classes
_load_candidate_classes(model_path)
# Load a pickled model
model = pickle.load(open(os.path.join(model_path, "model.pkl"), "rb"))
fonduer_model = model["fonduer_model"]
fonduer_model.preprocessor = model["preprosessor"]
fonduer_model.parser = ParserUDF(**model["parser"])
fonduer_model.mention_extractor = MentionExtractorUDF(**model["mention_extractor"])
fonduer_model.candidate_extractor = CandidateExtractorUDF(
**model["candidate_extractor"]
)
# Configure logging for Fonduer
init_logging(log_dir="logs")
pyfunc_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME
)
candidate_classes = fonduer_model.candidate_extractor.candidate_classes
fonduer_model.model_type = pyfunc_conf.get(MODEL_TYPE, "emmental")
if fonduer_model.model_type == "emmental":
emmental.init()
fonduer_model.featurizer = FeaturizerUDF(candidate_classes, FeatureExtractor())
fonduer_model.key_names = model["feature_keys"]
fonduer_model.word2id = model["word2id"]
fonduer_model.emmental_model = _load_emmental_model(model["emmental_model"])
else:
fonduer_model.labeler = LabelerUDF(candidate_classes)
fonduer_model.key_names = model["labeler_keys"]
fonduer_model.lfs = model["lfs"]
fonduer_model.label_models = []
for state_dict in model["label_models_state_dict"]:
label_model = LabelModel()
label_model.__dict__.update(state_dict)
fonduer_model.label_models.append(label_model)
return fonduer_model
def log_model(
fonduer_model: FonduerModel,
artifact_path: str,
preprocessor: DocPreprocessor,
parser: Parser,
mention_extractor: MentionExtractor,
candidate_extractor: CandidateExtractor,
conda_env: Optional[Union[Dict, str]] = None,
code_paths: Optional[List[str]] = None,
model_type: Optional[str] = "emmental",
labeler: Optional[Labeler] = None,
lfs: Optional[List[List[Callable]]] = None,
label_models: Optional[List[LabelModel]] = None,
featurizer: Optional[Featurizer] = None,
emmental_model: Optional[EmmentalModel] = None,
word2id: Optional[Dict] = None,
) -> None:
"""Log a Fonduer model as an MLflow artifact for the current run.
:param fonduer_model: Fonduer model to be saved.
:param artifact_path: Run-relative artifact path.
:param preprocessor: the doc preprocessor.
:param parser: self-explanatory
:param mention_extractor: self-explanatory
:param candidate_extractor: self-explanatory
:param conda_env: Either a dictionary representation of a Conda environment
or the path to a Conda environment yaml file.
:param code_paths: A list of local filesystem paths to Python file dependencies,
or directories containing file dependencies. These files are prepended to the
system path when the model is loaded.
:param model_type: the model type, either "emmental" or "label",
defaults to "emmental".
:param labeler: a labeler, defaults to None.
:param lfs: a list of list of labeling functions.
:param label_models: a list of label models, defaults to None.
:param featurizer: a featurizer, defaults to None.
:param emmental_model: an Emmental model, defaults to None.
:param word2id: a word embedding map.
"""
Model.log(
artifact_path=artifact_path,
flavor=sys.modules[__name__],
fonduer_model=fonduer_model,
preprocessor=preprocessor,
parser=parser,
mention_extractor=mention_extractor,
candidate_extractor=candidate_extractor,
conda_env=conda_env,
code_paths=code_paths,
model_type=model_type,
labeler=labeler,
lfs=lfs,
label_models=label_models,
featurizer=featurizer,
emmental_model=emmental_model,
word2id=word2id,
)
def save_model(
fonduer_model: FonduerModel,
path: str,
preprocessor: DocPreprocessor,
parser: Parser,
mention_extractor: MentionExtractor,
candidate_extractor: CandidateExtractor,
mlflow_model: Model = Model(),
conda_env: Optional[Union[Dict, str]] = None,
code_paths: Optional[List[str]] = None,
model_type: Optional[str] = "emmental",
labeler: Optional[Labeler] = None,
lfs: Optional[List[List[Callable]]] = None,
label_models: Optional[List[LabelModel]] = None,
featurizer: Optional[Featurizer] = None,
emmental_model: Optional[EmmentalModel] = None,
word2id: Optional[Dict] = None,
) -> None:
"""Save a Fonduer model to a path on the local file system.
:param fonduer_model: Fonduer model to be saved.
:param path: the path on the local file system.
:param preprocessor: the doc preprocessor.
:param parser: self-explanatory
:param mention_extractor: self-explanatory
:param candidate_extractor: self-explanatory
:param mlflow_model: model configuration.
:param conda_env: Either a dictionary representation of a Conda environment
or the path to a Conda environment yaml file.
:param code_paths: A list of local filesystem paths to Python file dependencies,
or directories containing file dependencies. These files are prepended to the
system path when the model is loaded.
:param model_type: the model type, either "emmental" or "label",
defaults to "emmental".
:param labeler: a labeler, defaults to None.
:param lfs: a list of list of labeling functions.
:param label_models: a list of label models, defaults to None.
:param featurizer: a featurizer, defaults to None.
:param emmental_model: an Emmental model, defaults to None.
:param word2id: a word embedding map.
"""
os.makedirs(path)
model_code_path = os.path.join(path, pyfunc.CODE)
os.makedirs(model_code_path)
# Save mention_classes and candidate_classes
_save_mention_classes(mention_extractor.udf_init_kwargs["mention_classes"], path)
_save_candidate_classes(
candidate_extractor.udf_init_kwargs["candidate_classes"], path
)
# Makes lfs unpicklable w/o the module (ie fonduer_lfs.py)
# https://github.com/cloudpipe/cloudpickle/issues/206#issuecomment-555939172
modules = []
if model_type == "label":
for _ in lfs:
for lf in _:
modules.append(lf.__module__)
lf.__module__ = "__main__"
# Note that instances of ParserUDF and other UDF theselves are not picklable.
# https://stackoverflow.com/a/52026025
model = {
"fonduer_model": fonduer_model,
"preprosessor": preprocessor,
"parser": parser.udf_init_kwargs,
"mention_extractor": mention_extractor.udf_init_kwargs,
"candidate_extractor": candidate_extractor.udf_init_kwargs,
}
if model_type == "emmental":
key_names = [key.name for key in featurizer.get_keys()]
model["feature_keys"] = key_names
model["word2id"] = word2id
model["emmental_model"] = _save_emmental_model(emmental_model)
else:
key_names = [key.name for key in labeler.get_keys()]
model["labeler_keys"] = key_names
model["lfs"] = lfs
model["label_models_state_dict"] = [
label_model.__dict__ for label_model in label_models
]
pickle.dump(model, open(os.path.join(path, "model.pkl"), "wb"))
# Restore __module__ back to the original
if model_type == "label":
for _ in lfs:
for lf in _:
lf.__module__ = modules.pop()
# Create a conda yaml file.
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = _get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Copy code_paths.
if code_paths is not None:
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=model_code_path)
mlflow_model.add_flavor(
pyfunc.FLAVOR_NAME,
code=pyfunc.CODE,
loader_module=__name__,
model_type=model_type,
env=conda_env_subpath,
)
mlflow_model.save(os.path.join(path, "MLmodel"))
def _get_default_conda_env() -> Optional[Dict[str, Any]]:
"""Get default Conda environment.
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import torch
import fonduer
return _mlflow_conda_env(
additional_conda_deps=[
"pytorch={}".format(torch.__version__),
"psycopg2",
"pip",
],
additional_pip_deps=["fonduer=={}".format(fonduer.__version__)],
additional_conda_channels=["pytorch"],
)
def _save_emmental_model(emmental_model: EmmentalModel) -> bytes:
buffer = BytesIO()
torch.save(emmental_model, buffer)
buffer.seek(0)
return buffer.read()
def _load_emmental_model(b: bytes) -> EmmentalModel:
buffer = BytesIO()
buffer.write(b)
buffer.seek(0)
return torch.load(buffer)
def _save_mention_classes(mention_classes: List[Mention], path: str) -> None:
pickle.dump(
[
{
"class_name": mention_class.__name__,
"cardinality": mention_class.cardinality,
"values": mention_class.values,
"table_name": mention_class.__tablename__,
}
for mention_class in mention_classes
],
open(os.path.join(path, "mention_classes.pkl"), "wb"),
)
def _load_mention_classes(path: str) -> None:
for kwargs in pickle.load(open(os.path.join(path, "mention_classes.pkl"), "rb")):
mention_subclass(**kwargs)
def _save_candidate_classes(candidate_classes: List[Candidate], path: str) -> None:
pickle.dump(
[
{
"class_name": candidate_class.__name__,
"mention_class_names": [
candidate_class.__name__
for candidate_class in candidate_class.mentions
],
"table_name": candidate_class.__tablename__,
"cardinality": candidate_class.cardinality,
"values": candidate_class.values,
}
for candidate_class in candidate_classes
],
open(os.path.join(path, "candidate_classes.pkl"), "wb"),
)
def _load_candidate_classes(path: str) -> None:
for kwargs in pickle.load(open(os.path.join(path, "candidate_classes.pkl"), "rb")):
# Convert the classnames of mention to mention_classes
kwargs["args"] = [
mention_subclasses[mention_class_name][0]
for mention_class_name in kwargs.pop("mention_class_names")
]
candidate_subclass(**kwargs)
| fonduer-master | src/fonduer/packaging/fonduer_model.py |
"""Fonduer's packaging module."""
from fonduer.packaging.fonduer_model import FonduerModel, log_model, save_model
__all__ = [
"FonduerModel",
"save_model",
"log_model",
]
| fonduer-master | src/fonduer/packaging/__init__.py |
"""Customized Emmental task for Fonduer."""
import logging
from functools import partial
from typing import Any, Dict, List, Optional, Union
from emmental.modules.embedding_module import EmbeddingModule
from emmental.modules.rnn_module import RNN
from emmental.modules.sparse_linear_module import SparseLinear
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from torch import Tensor, nn as nn
from torch.nn import functional as F
from fonduer.learning.modules.concat_linear import ConcatLinear
from fonduer.learning.modules.soft_cross_entropy_loss import SoftCrossEntropyLoss
from fonduer.utils.config import get_config
logger = logging.getLogger(__name__)
sce_loss = SoftCrossEntropyLoss()
def loss(
module_name: str,
intermediate_output_dict: Dict[str, Any],
Y: Tensor,
active: Tensor,
) -> Tensor:
"""Define the loss of the task.
:param module_name: The module name to calculate the loss.
:param intermediate_output_dict: The intermediate output dictionary
:param Y: Ground truth labels.
:param active: The sample mask.
:return: Loss.
"""
if len(Y.size()) == 1:
label = intermediate_output_dict[module_name][0].new_zeros(
intermediate_output_dict[module_name][0].size()
)
label.scatter_(1, Y.view(Y.size()[0], 1), 1.0)
else:
label = Y
return sce_loss(intermediate_output_dict[module_name][0][active], label[active])
def output(module_name: str, intermediate_output_dict: Dict[str, Any]) -> Tensor:
"""Define the output of the task.
:param module_name: The module name to calculate the loss.
:param intermediate_output_dict: The intermediate output dictionary
:return: Output tensor.
"""
return F.softmax(intermediate_output_dict[module_name][0])
def create_task(
task_names: Union[str, List[str]],
n_arities: Union[int, List[int]],
n_features: int,
n_classes: Union[int, List[int]],
emb_layer: Optional[EmbeddingModule],
model: str = "LSTM",
mode: str = "MTL",
) -> List[EmmentalTask]:
"""Create task from relation(s).
:param task_names: Relation name(s), If str, only one relation; If List[str],
multiple relations.
:param n_arities: The arity of each relation.
:param n_features: The multimodal feature set size.
:param n_classes: Number of classes for each task. (Only support classification
task now).
:param emb_layer: The embedding layer for LSTM. No need for LogisticRegression
model.
:param model: Model name (available models: "LSTM", "LogisticRegression"),
defaults to "LSTM".
:param mode: Learning mode (available modes: "STL", "MTL"),
defaults to "MTL".
"""
if model not in ["LSTM", "LogisticRegression"]:
raise ValueError(
f"Unrecognized model {model}. Only support {['LSTM', 'LogisticRegression']}"
)
if mode not in ["STL", "MTL"]:
raise ValueError(f"Unrecognized mode {mode}. Only support {['STL', 'MTL']}")
config = get_config()["learning"][model]
logger.info(f"{model} model config: {config}")
if not isinstance(task_names, list):
task_names = [task_names]
if not isinstance(n_arities, list):
n_arities = [n_arities]
if not isinstance(n_classes, list):
n_classes = [n_classes]
tasks = []
for task_name, n_arity, n_class in zip(task_names, n_arities, n_classes):
if mode == "MTL":
feature_module_name = "shared_feature"
else:
feature_module_name = f"{task_name}_feature"
if model == "LSTM":
module_pool = nn.ModuleDict(
{
"emb": emb_layer,
feature_module_name: SparseLinear(
n_features + 1, config["hidden_dim"], bias=config["bias"]
),
}
)
for i in range(n_arity):
module_pool.update(
{
f"{task_name}_lstm{i}": RNN(
num_classes=0,
emb_size=emb_layer.dim,
lstm_hidden=config["hidden_dim"],
attention=config["attention"],
dropout=config["dropout"],
bidirectional=config["bidirectional"],
)
}
)
module_pool.update(
{
f"{task_name}_pred_head": ConcatLinear(
[f"{task_name}_lstm{i}" for i in range(n_arity)]
+ [feature_module_name],
config["hidden_dim"] * (2 * n_arity + 1)
if config["bidirectional"]
else config["hidden_dim"] * (n_arity + 1),
n_class,
)
}
)
task_flow = []
task_flow += [
{
"name": f"{task_name}_emb{i}",
"module": "emb",
"inputs": [("_input_", f"m{i}")],
}
for i in range(n_arity)
]
task_flow += [
{
"name": f"{task_name}_lstm{i}",
"module": f"{task_name}_lstm{i}",
"inputs": [(f"{task_name}_emb{i}", 0), ("_input_", f"m{i}_mask")],
}
for i in range(n_arity)
]
task_flow += [
{
"name": feature_module_name,
"module": feature_module_name,
"inputs": [
("_input_", "feature_index"),
("_input_", "feature_weight"),
],
}
]
task_flow += [
{
"name": f"{task_name}_pred_head",
"module": f"{task_name}_pred_head",
"inputs": None,
}
]
elif model == "LogisticRegression":
module_pool = nn.ModuleDict(
{
feature_module_name: SparseLinear(
n_features + 1, config["hidden_dim"], bias=config["bias"]
),
f"{task_name}_pred_head": ConcatLinear(
[feature_module_name], config["hidden_dim"], n_class
),
}
)
task_flow = [
{
"name": feature_module_name,
"module": feature_module_name,
"inputs": [
("_input_", "feature_index"),
("_input_", "feature_weight"),
],
},
{
"name": f"{task_name}_pred_head",
"module": f"{task_name}_pred_head",
"inputs": None,
},
]
else:
raise ValueError(f"Unrecognized model {model}.")
tasks.append(
EmmentalTask(
name=task_name,
module_pool=module_pool,
task_flow=task_flow,
loss_func=partial(loss, f"{task_name}_pred_head"),
output_func=partial(output, f"{task_name}_pred_head"),
scorer=Scorer(metrics=["accuracy", "precision", "recall", "f1"]),
)
)
return tasks
| fonduer-master | src/fonduer/learning/task.py |
"""Fonduer's learning module."""
| fonduer-master | src/fonduer/learning/__init__.py |
"""Fonduer dataset."""
import logging
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from emmental.data import EmmentalDataset
from scipy.sparse import csr_matrix
from torch import Tensor
from fonduer.candidates.models import Candidate
from fonduer.learning.utils import mark_sentence, mention_to_tokens
logger = logging.getLogger(__name__)
class FonduerDataset(EmmentalDataset):
"""A FonduerDataset class which is inherited from EmmentalDataset.
This class takes list of candidates and corresponding feature matrix as input and
wraps them.
:param name: The name of the dataset.
:param candidates: The list of candidates.
:param features: The corresponding feature matrix.
:param word2id: The name of the dataset.
:param labels: If np.array, it's the label for all candidates; If int, it's
the number of classes of label and we will create placeholder labels
(mainly used for inference).
:param labels: Which candidates to use. If None, use all candidates.
"""
def __init__(
self,
name: str,
candidates: List[Candidate],
features: csr_matrix,
word2id: Dict,
labels: Union[np.array, int],
index: Optional[List[int]] = None,
):
"""Initialize FonduerDataset."""
self.name = name
self.candidates = candidates
self.features = features
self.word2id = word2id
self.labels = labels
self.index = index
self.X_dict: Dict[str, List[Any]] = {}
self.Y_dict: Dict[str, Tensor] = {}
self._map_to_id()
self._map_features()
self._map_labels()
uids = [f"{self.name}_{idx}" for idx in range(len(self.candidates))]
self.add_features({"_uids_": uids})
super().__init__(name, self.X_dict, self.Y_dict, "_uids_")
def __len__(self) -> int:
"""Get the length of the dataset."""
try:
if self.index is not None:
return len(self.index)
else:
return len(next(iter(self.X_dict.values())))
except StopIteration:
return 0
def __getitem__(
self, index: int
) -> Tuple[Dict[str, Union[Tensor, list]], Dict[str, Tensor]]:
"""Get the data from dataset."""
if self.index is not None:
index = self.index[index]
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
y_dict = {name: label[index] for name, label in self.Y_dict.items()}
return x_dict, y_dict
def _map_to_id(self) -> None:
self.X_dict.update(
dict([(f"m{i}", []) for i in range(len(self.candidates[0]))])
)
for candidate in self.candidates:
for i in range(len(candidate)):
# Add mark for each mention in the original sentence
args = [
(
candidate[i].context.get_word_start_index(),
candidate[i].context.get_word_end_index(),
i,
)
]
s = mark_sentence(mention_to_tokens(candidate[i]), args)
self.X_dict[f"m{i}"].append(
torch.tensor(
[
self.word2id[w]
if w in self.word2id
else self.word2id["<unk>"]
for w in s
],
dtype=torch.long,
)
)
def _map_features(self) -> None:
self.X_dict.update({"feature_index": [], "feature_weight": []})
for i in range(len(self.candidates)):
self.X_dict["feature_index"].append(
torch.tensor(
self.features.indices[
self.features.indptr[i] : self.features.indptr[i + 1]
],
dtype=torch.long,
)
+ 1
)
self.X_dict["feature_weight"].append(
torch.tensor(
self.features.data[
self.features.indptr[i] : self.features.indptr[i + 1]
],
dtype=torch.float,
)
)
def _map_labels(self) -> None:
if isinstance(self.labels, int):
self.Y_dict.update(
{
"labels": torch.from_numpy(
np.random.randint(self.labels, size=len(self.candidates))
)
}
)
else:
self.Y_dict.update({"labels": torch.tensor(np.array(self.labels))})
| fonduer-master | src/fonduer/learning/dataset.py |
"""Fonduer learning utils."""
import logging
from collections import Counter
from typing import Dict, List, Set, Tuple, Union
import numpy as np
from sqlalchemy.orm import Session
from fonduer.candidates.models import Candidate, Mention
from fonduer.learning.models.marginal import Marginal
logger = logging.getLogger(__name__)
def save_marginals(
session: Session, X: List[Candidate], marginals: Session, training: bool = True
) -> None:
"""Save marginal probabilities for a set of Candidates to db.
:param X: A list of arbitrary objects with candidate ids accessible via a
.id attrib
:param marginals: A dense M x K matrix of marginal probabilities, where
K is the cardinality of the candidates, OR a M-dim list/array if K=2.
:param training: If True, these are training marginals / labels; else they
are saved as end model predictions.
Note: The marginals for k=0 are not stored, only for k = 1,...,K
"""
logger = logging.getLogger(__name__)
# Make sure that we are working with a numpy array
try:
shape = marginals.shape
except Exception:
marginals = np.array(marginals)
shape = marginals.shape
# Handle binary input as M x 1-dim array; assume elements represent
# poksitive (k=1) class values
if len(shape) == 1:
marginals = np.vstack([1 - marginals, marginals]).T
# Only add values for classes k=1,...,K
marginal_tuples = []
for i in range(shape[0]):
for k in range(1, shape[1] if len(shape) > 1 else 2):
if marginals[i, k] > 0:
marginal_tuples.append((i, k, marginals[i, k]))
# NOTE: This will delete all existing marginals of type `training`
session.query(Marginal).filter(Marginal.training == training).delete(
synchronize_session="fetch"
)
# Prepare bulk INSERT query
q = Marginal.__table__.insert()
# Prepare values
insert_vals = []
for i, k, p in marginal_tuples:
cid = X[i].id
insert_vals.append(
{
"candidate_id": cid,
"training": training,
"value": k,
# We cast p in case its a numpy type, which psycopg2 does not handle
"probability": float(p),
}
)
# Execute update
session.execute(q, insert_vals)
session.commit()
logger.info(f"Saved {len(marginals)} marginals")
def confusion_matrix(pred: Set, gold: Set) -> Tuple[Set, Set, Set]:
"""Return a confusion matrix.
This can be used for both entity-level and mention-level
:param pred: a set of predicted entities/candidates
:param gold: a set of golden entities/candidates
:return: a tuple of TP, FP, and FN
"""
if not isinstance(pred, set):
pred = set(pred)
if not isinstance(gold, set):
gold = set(gold)
TP = pred.intersection(gold)
FP = pred.difference(gold)
FN = gold.difference(pred)
return (TP, FP, FN)
def mention_to_tokens(
mention: Mention, token_type: str = "words", lowercase: bool = False
) -> List[str]:
"""Extract tokens from the mention.
:param mention: mention object.
:param token_type: token type that wants to extract (e.g. words, lemmas, poses).
:param lowercase: use lowercase or not.
:return: The token list.
"""
tokens = getattr(mention.context.sentence, token_type)
return [w.lower() if lowercase else w for w in tokens]
def mark(l: int, h: int, idx: int) -> List[Tuple[int, str]]:
"""Produce markers based on argument positions.
:param l: sentence position of first word in argument.
:param h: sentence position of last word in argument.
:param idx: argument index (1 or 2).
:return: markers.
"""
return [(l, f"~~[[{idx}"), (h + 1, f"{idx}]]~~")]
def mark_sentence(s: List[str], args: List[Tuple[int, int, int]]) -> List[str]:
"""Insert markers around relation arguments in word sequence.
:param s: list of tokens in sentence.
:param args: list of triples (l, h, idx) as per @_mark(...) corresponding
to relation arguments
:return: The marked sentence.
Example:
Then Barack married Michelle.
-> Then ~~[[1 Barack 1]]~~ married ~~[[2 Michelle 2]]~~.
"""
marks = sorted([y for m in args for y in mark(*m)], reverse=True)
x = list(s)
for k, v in marks:
x.insert(k, v)
return x
def collect_word_counter(
candidates: Union[List[Candidate], List[List[Candidate]]]
) -> Dict[str, int]:
"""Collect word counter from candidates.
:param candidates: The candidates used to collect word counter.
:return: The word counter.
"""
word_counter: Counter = Counter()
if isinstance(candidates[0], list):
candidates = [cand for candidate in candidates for cand in candidate]
for candidate in candidates:
for mention in candidate:
word_counter.update(mention_to_tokens(mention))
return word_counter
| fonduer-master | src/fonduer/learning/utils.py |
"""Fonduer's learning model module."""
from fonduer.learning.models.marginal import Marginal
from fonduer.learning.models.prediction import Prediction, PredictionKey
__all__ = ["Marginal", "Prediction", "PredictionKey"]
| fonduer-master | src/fonduer/learning/models/__init__.py |
"""Fonduer learning prediction model."""
from sqlalchemy import Column, Float
from fonduer.meta import Meta
from fonduer.utils.models.annotation import AnnotationKeyMixin, AnnotationMixin
class PredictionKey(AnnotationKeyMixin, Meta.Base):
"""A Prediction's annotation key."""
pass
class Prediction(AnnotationMixin, Meta.Base):
"""A Prediction table.
A probability associated with a Candidate, indicating the degree of belief
that the Candidate is true.
A Prediction's annotation key indicates which process or method produced
the Prediction, e.g., which model with which ParameterSet.
"""
value = Column(Float, nullable=False)
| fonduer-master | src/fonduer/learning/models/prediction.py |
"""Fonduer learning marginal model."""
from sqlalchemy import Boolean, Column, Float, ForeignKey, Integer, UniqueConstraint
from fonduer.meta import Meta
class Marginal(Meta.Base):
"""
A marginal probability corresponding to a (Candidate, value) pair.
Represents:
P(candidate = value) = probability
@training: If True, this is a training marginal; otherwise is end prediction
"""
__tablename__ = "marginal"
id = Column(Integer, primary_key=True)
candidate_id = Column(Integer, ForeignKey("candidate.id", ondelete="CASCADE"))
training = Column(Boolean, default=True)
value = Column(Integer, nullable=False, default=1)
probability = Column(Float, nullable=False, default=0.0)
__table_args__ = (UniqueConstraint(candidate_id, training, value),)
def __repr__(self) -> str:
"""Represent the marginal as a string."""
label = "Training" if self.training else "Predicted"
return (
f"<"
f"{label} "
f"Marginal: P({self.candidate_id} == {self.value}) = {self.probability}"
f">"
)
| fonduer-master | src/fonduer/learning/models/marginal.py |
"""Fonduer's learning modules."""
| fonduer-master | src/fonduer/learning/modules/__init__.py |
"""Soft cross entropy loss."""
from typing import List
import torch
from torch import Tensor, nn as nn
from torch.nn import functional as F
class SoftCrossEntropyLoss(nn.Module):
"""Calculate the CrossEntropyLoss with soft targets.
:param weight: Weight to assign to each of the classes. Default: None
:param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.
'none': no reduction,
'mean': the mean of the losses,
'sum': the sum of the losses.
"""
def __init__(self, weight: List[float] = None, reduction: str = "mean"):
"""Initialize SoftCrossEntropyLoss."""
super().__init__()
if weight is None:
self.weight = None
else:
self.register_buffer("weight", torch.tensor(weight))
self.reduction = reduction
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""Calculate the loss.
:param input: prediction logits
:param target: target probabilities
:return: loss
"""
n, k = input.shape
losses = input.new_zeros(n)
for i in range(k):
cls_idx = input.new_full((n,), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduction="none")
if self.weight is not None:
loss = loss * self.weight[i]
losses += target[:, i].float() * loss
if self.reduction == "mean":
losses = losses.mean()
elif self.reduction == "sum":
losses = losses.sum()
elif self.reduction != "none":
raise ValueError(f"Unrecognized reduction: {self.reduction}")
return losses
| fonduer-master | src/fonduer/learning/modules/soft_cross_entropy_loss.py |
"""Concat linear."""
from typing import Any, Dict, List
import torch
from torch import Tensor, nn as nn
class ConcatLinear(nn.Module):
"""Concat different outputs and feed into a linear layer.
:param concat_output_keys: The keys of features to concat.
:param input_dim: The total sum of input dim.
:param outpt_dim: The output dim.
"""
def __init__(
self, concat_output_keys: List[str], input_dim: int, outpt_dim: int
) -> None:
"""Initialize ConcatLinear."""
super().__init__()
self.concat_output_keys = concat_output_keys
self.linear = nn.Linear(input_dim, outpt_dim)
def forward(self, intermediate_output_dict: Dict[str, Any]) -> Tensor:
"""Forward function."""
input_feature = torch.cat(
[intermediate_output_dict[key][0] for key in self.concat_output_keys], dim=1
)
return self.linear(input_feature)
| fonduer-master | src/fonduer/learning/modules/concat_linear.py |
"""Fonduer featurizer."""
import itertools
import logging
from collections import defaultdict
from typing import (
Any,
Collection,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Type,
Union,
)
from scipy.sparse import csr_matrix
from sqlalchemy.orm import Session
from fonduer.candidates.models import Candidate
from fonduer.features.feature_extractors import FeatureExtractor
from fonduer.features.models import Feature, FeatureKey
from fonduer.parser.models.document import Document
from fonduer.utils.udf import UDF, UDFRunner
from fonduer.utils.utils_udf import (
ALL_SPLITS,
batch_upsert_records,
drop_all_keys,
drop_keys,
get_docs_from_split,
get_mapping,
get_sparse_matrix,
get_sparse_matrix_keys,
upsert_keys,
)
logger = logging.getLogger(__name__)
class Featurizer(UDFRunner):
"""An operator to add Feature Annotations to Candidates.
:param session: The database session to use.
:param candidate_classes: A list of candidate_subclasses to featurize.
:param parallelism: The number of processes to use in parallel. Default 1.
"""
def __init__(
self,
session: Session,
candidate_classes: List[Candidate],
feature_extractors: FeatureExtractor = FeatureExtractor(),
parallelism: int = 1,
) -> None:
"""Initialize the Featurizer."""
super().__init__(
session,
FeaturizerUDF,
parallelism=parallelism,
candidate_classes=candidate_classes,
feature_extractors=feature_extractors,
)
self.candidate_classes = candidate_classes
def update(
self,
docs: Optional[Collection[Document]] = None,
split: int = 0,
parallelism: Optional[int] = None,
progress_bar: bool = True,
) -> None:
"""Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
"""
self.apply(
docs=docs,
split=split,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
)
def apply( # type: ignore
self,
docs: Optional[Collection[Document]] = None,
split: int = 0,
train: bool = False,
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
) -> None:
"""Apply features to the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:param train: Whether or not to update the global key set of features
and the features of candidates.
:param clear: Whether or not to clear the features table before
applying features.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
"""
if docs:
# Call apply on the specified docs for all splits
# TODO: split is int
split = ALL_SPLITS # type: ignore
super().apply(
docs,
split=split,
train=train,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit()
else:
# Only grab the docs containing candidates from the given split.
split_docs = get_docs_from_split(
self.session, self.candidate_classes, split
)
super().apply(
split_docs,
split=split,
train=train,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit()
def upsert_keys(
self,
keys: Iterable[str],
candidate_classes: Union[Candidate, Iterable[Candidate], None] = None,
) -> None:
"""Upsert the specified keys to FeatureKey.
:param keys: A list of FeatureKey names to upsert.
:param candidate_classes: A list of the Candidates to upsert the key for.
If None, upsert the keys for all candidate classes associated with
this Featurizer.
"""
# Make sure keys is iterable
keys = keys if isinstance(keys, (list, tuple)) else [keys]
# Make sure candidate_classes is iterable
if candidate_classes:
candidate_classes = (
candidate_classes
if isinstance(candidate_classes, Iterable)
else [candidate_classes]
)
# Ensure only candidate classes associated with the featurizer
# are used.
candidate_classes = [
_.__tablename__
for _ in candidate_classes
if _ in self.candidate_classes
]
if len(candidate_classes) == 0:
logger.warning(
"You didn't specify valid candidate classes for this featurizer."
)
return
# If unspecified, just use all candidate classes
else:
candidate_classes = [_.__tablename__ for _ in self.candidate_classes]
# build dict for use by utils
key_map = dict()
for key in keys:
key_map[key] = set(candidate_classes)
upsert_keys(self.session, FeatureKey, key_map)
def drop_keys(
self,
keys: Iterable[str],
candidate_classes: Union[Candidate, Iterable[Candidate], None] = None,
) -> None:
"""Drop the specified keys from FeatureKeys.
:param keys: A list of FeatureKey names to delete.
:param candidate_classes: A list of the Candidates to drop the key for.
If None, drops the keys for all candidate classes associated with
this Featurizer.
"""
# Make sure keys is iterable
keys = keys if isinstance(keys, (list, tuple)) else [keys]
# Make sure candidate_classes is iterable
if candidate_classes:
candidate_classes = (
candidate_classes
if isinstance(candidate_classes, Iterable)
else [candidate_classes]
)
# Ensure only candidate classes associated with the featurizer
# are used.
candidate_classes = [
_.__tablename__
for _ in candidate_classes
if _ in self.candidate_classes
]
if len(candidate_classes) == 0:
logger.warning(
"You didn't specify valid candidate classes for this featurizer."
)
return
# If unspecified, just use all candidate classes
else:
candidate_classes = [_.__tablename__ for _ in self.candidate_classes]
# build dict for use by utils
key_map = dict()
for key in keys:
key_map[key] = set(candidate_classes)
drop_keys(self.session, FeatureKey, key_map)
def get_keys(self) -> List[FeatureKey]:
"""Return a list of keys for the Features.
:return: List of FeatureKeys.
"""
return list(get_sparse_matrix_keys(self.session, FeatureKey))
def _add(self, session: Session, records_list: List[List[Dict[str, Any]]]) -> None:
# Make a flat list of all records from the list of list of records.
# This helps reduce the number of queries needed to update.
all_records = list(itertools.chain.from_iterable(records_list))
batch_upsert_records(session, Feature, all_records)
def clear(self, train: bool = False, split: int = 0) -> None: # type: ignore
"""Delete Features of each class from the database.
:param train: Whether or not to clear the FeatureKeys
:param split: Which split of candidates to clear features from.
"""
# Clear Features for the candidates in the split passed in.
logger.info(f"Clearing Features (split {split})")
if split == ALL_SPLITS:
sub_query = self.session.query(Candidate.id).subquery()
else:
sub_query = (
self.session.query(Candidate.id)
.filter(Candidate.split == split)
.subquery()
)
query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query))
query.delete(synchronize_session="fetch")
# Delete all old annotation keys
if train:
logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...")
drop_all_keys(self.session, FeatureKey, self.candidate_classes)
def clear_all(self) -> None:
"""Delete all Features."""
logger.info("Clearing ALL Features and FeatureKeys.")
self.session.query(Feature).delete(synchronize_session="fetch")
self.session.query(FeatureKey).delete(synchronize_session="fetch")
def _after_apply(self, train: bool = False, **kwargs: Any) -> None:
# Insert all Feature Keys
if train:
key_map: DefaultDict[str, set] = defaultdict(set)
for feature in self.session.query(Feature).all():
cand = feature.candidate
for key in feature.keys:
key_map[key].add(cand.__class__.__tablename__)
self.session.query(FeatureKey).delete(synchronize_session="fetch")
# TODO: upsert is too much. insert is fine as all keys are deleted.
upsert_keys(self.session, FeatureKey, key_map)
def get_feature_matrices(
self, cand_lists: List[List[Candidate]]
) -> List[csr_matrix]:
"""Load sparse matrix of Features for each candidate_class.
:param cand_lists: The candidates to get features for.
:return: A list of MxN sparse matrix where M are the candidates and N is the
features.
"""
return get_sparse_matrix(self.session, FeatureKey, cand_lists)
class FeaturizerUDF(UDF):
"""UDF for performing candidate extraction."""
def __init__(
self,
candidate_classes: Iterable[Type[Candidate]],
feature_extractors: FeatureExtractor,
**kwargs: Any,
) -> None:
"""Initialize the FeaturizerUDF."""
self.candidate_classes = (
candidate_classes
if isinstance(candidate_classes, (list, tuple))
else [candidate_classes]
)
self.feature_extractors = feature_extractors
super().__init__(**kwargs)
def apply(self, doc: Document, **kwargs: Any) -> List[List[Dict[str, Any]]]:
"""Extract candidates from the given Context.
:param doc: A document to process.
"""
logger.debug(f"Document: {doc}")
# Get all the candidates in this doc that will be featurized
cands_list = [
getattr(doc, candidate_class.__tablename__ + "s")
for candidate_class in self.candidate_classes
]
records_list = [
list(get_mapping(Feature, cands, self.feature_extractors.extract))
for cands in cands_list
]
return records_list
| fonduer-master | src/fonduer/features/featurizer.py |
"""Fonduer's features module."""
from fonduer.features.feature_extractors import FeatureExtractor
from fonduer.features.featurizer import Featurizer
__all__ = ["Featurizer", "FeatureExtractor"]
| fonduer-master | src/fonduer/features/__init__.py |
"""Fonduer feature extractor."""
from typing import Callable, Dict, Iterator, List, Tuple, Union
from fonduer.candidates.models import Candidate
from fonduer.features.feature_libs.structural_features import (
extract_structural_features,
)
from fonduer.features.feature_libs.tabular_features import extract_tabular_features
from fonduer.features.feature_libs.textual_features import extract_textual_features
from fonduer.features.feature_libs.visual_features import extract_visual_features
FEATURES: Dict[str, Callable[[List[Candidate]], Iterator[Tuple[int, str, int]]]] = {
"textual": extract_textual_features,
"structural": extract_structural_features,
"tabular": extract_tabular_features,
"visual": extract_visual_features,
}
# Type alias for feature_func
Feature_func = Callable[[List[Candidate]], Iterator[Tuple[int, str, int]]]
class FeatureExtractor(object):
"""A class to extract features from candidates.
:param features: a list of which Fonduer feature types to extract, defaults
to ["textual", "structural", "tabular", "visual"]
:param customize_feature_funcs: a list of customized feature extractors where the
extractor takes a list of candidates as input and yield tuples
of (candidate_id, feature, value), defaults to []
"""
def __init__(
self,
features: List[str] = ["textual", "structural", "tabular", "visual"],
customize_feature_funcs: Union[Feature_func, List[Feature_func]] = [],
) -> None:
"""Initialize FeatureExtractor."""
if not isinstance(customize_feature_funcs, list):
customize_feature_funcs = [customize_feature_funcs]
self.feature_extractors: List[
Callable[[List[Candidate]], Iterator[Tuple[int, str, int]]]
] = []
for feature in features:
if feature not in FEATURES:
raise ValueError(f"Unrecognized feature type: {feature}")
self.feature_extractors.append(FEATURES[feature])
self.feature_extractors.extend(customize_feature_funcs)
def extract(
self, candidates: Union[List[Candidate], Candidate]
) -> Iterator[Tuple[int, str, int]]:
"""Extract features from candidates.
:param candidates: A list of candidates to extract features from
"""
candidates = candidates if isinstance(candidates, list) else [candidates]
for feature_extractor in self.feature_extractors:
for candidate_id, feature, value in feature_extractor(candidates):
yield candidate_id, feature, value
| fonduer-master | src/fonduer/features/feature_extractors.py |
"""Fonduer visual feature extractor."""
from typing import Dict, Iterator, List, Set, Tuple, Union
from fonduer.candidates.models import Candidate
from fonduer.candidates.models.span_mention import SpanMention, TemporarySpanMention
from fonduer.utils.data_model_utils import (
get_visual_aligned_lemmas,
is_horz_aligned,
is_vert_aligned,
is_vert_aligned_center,
is_vert_aligned_left,
is_vert_aligned_right,
same_page,
)
FEAT_PRE = "VIZ_"
DEF_VALUE = 1
unary_vizlib_feats: Dict[str, Set] = {}
multinary_vizlib_feats: Dict[str, Set] = {}
def extract_visual_features(
candidates: Union[Candidate, List[Candidate]],
) -> Iterator[Tuple[int, str, int]]:
"""Extract visual features.
:param candidates: A list of candidates to extract features from
"""
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
args = tuple([m.context for m in candidate.get_mentions()])
if any(not (isinstance(arg, TemporarySpanMention)) for arg in args):
raise ValueError(
f"Visual feature only accepts Span-type arguments, "
f"{type(candidate)}-type found."
)
# Unary candidates
if len(args) == 1:
span = args[0]
# Add VisualLib entity features (if applicable)
if span.sentence.is_visual():
if span.stable_id not in unary_vizlib_feats:
unary_vizlib_feats[span.stable_id] = set()
for f, v in _vizlib_unary_features(span):
unary_vizlib_feats[span.stable_id].add((f, v))
for f, v in unary_vizlib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + f, v
# Multinary candidates
else:
spans = args
# Add VisualLib entity features (if applicable)
if all([span.sentence.is_visual() for span in spans]):
for i, span in enumerate(spans):
prefix = f"e{i}_"
if span.stable_id not in unary_vizlib_feats:
unary_vizlib_feats[span.stable_id] = set()
for f, v in _vizlib_unary_features(span):
unary_vizlib_feats[span.stable_id].add((f, v))
for f, v in unary_vizlib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + prefix + f, v
if candidate.id not in multinary_vizlib_feats:
multinary_vizlib_feats[candidate.id] = set()
for f, v in _vizlib_multinary_features(spans):
multinary_vizlib_feats[candidate.id].add((f, v))
for f, v in multinary_vizlib_feats[candidate.id]:
yield candidate.id, FEAT_PRE + f, v
def _vizlib_unary_features(span: SpanMention) -> Iterator[Tuple[str, int]]:
"""Visual-related features for a single span."""
if not span.sentence.is_visual():
return
for f in get_visual_aligned_lemmas(span):
yield f"ALIGNED_{f}", DEF_VALUE
for page in set(span.get_attrib_tokens("page")):
yield f"PAGE_[{page}]", DEF_VALUE
def _vizlib_multinary_features(
spans: Tuple[SpanMention, ...]
) -> Iterator[Tuple[str, int]]:
"""Visual-related features for multiple spans."""
if same_page(spans):
yield "SAME_PAGE", DEF_VALUE
if is_horz_aligned(spans):
yield "HORZ_ALIGNED", DEF_VALUE
if is_vert_aligned(spans):
yield "VERT_ALIGNED", DEF_VALUE
if is_vert_aligned_left(spans):
yield "VERT_ALIGNED_LEFT", DEF_VALUE
if is_vert_aligned_right(spans):
yield "VERT_ALIGNED_RIGHT", DEF_VALUE
if is_vert_aligned_center(spans):
yield "VERT_ALIGNED_CENTER", DEF_VALUE
| fonduer-master | src/fonduer/features/feature_libs/visual_features.py |
"""Fonduer tabular feature extractor."""
from typing import Dict, Iterator, List, Set, Tuple, Union
from fonduer.candidates.models import Candidate
from fonduer.candidates.models.span_mention import SpanMention, TemporarySpanMention
from fonduer.utils.config import get_config
from fonduer.utils.data_model_utils import (
get_cell_ngrams,
get_col_ngrams,
get_head_ngrams,
get_row_ngrams,
)
from fonduer.utils.utils_table import min_col_diff, min_row_diff, num_cols, num_rows
FEAT_PRE = "TAB_"
DEF_VALUE = 1
unary_tablelib_feats: Dict[str, Set] = {}
multinary_tablelib_feats: Dict[str, Set] = {}
settings = get_config()
def extract_tabular_features(
candidates: Union[Candidate, List[Candidate]],
) -> Iterator[Tuple[int, str, int]]:
"""Extract tabular features.
:param candidates: A list of candidates to extract features from
"""
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
args = tuple([m.context for m in candidate.get_mentions()])
if any(not (isinstance(arg, TemporarySpanMention)) for arg in args):
raise ValueError(
f"Table feature only accepts Span-type arguments, "
f"{type(candidate)}-type found."
)
# Unary candidates
if len(args) == 1:
span = args[0]
if span.stable_id not in unary_tablelib_feats:
unary_tablelib_feats[span.stable_id] = set()
for f, v in _tablelib_unary_features(span):
unary_tablelib_feats[span.stable_id].add((f, v))
for f, v in unary_tablelib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + f, v
# Multinary candidates
else:
spans = args
if any([span.sentence.is_tabular() for span in spans]):
for i, span in enumerate(spans):
prefix = f"e{i}_"
if span.stable_id not in unary_tablelib_feats:
unary_tablelib_feats[span.stable_id] = set()
for f, v in _tablelib_unary_features(span):
unary_tablelib_feats[span.stable_id].add((f, v))
for f, v in unary_tablelib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + prefix + f, v
if candidate.id not in multinary_tablelib_feats:
multinary_tablelib_feats[candidate.id] = set()
for f, v in _tablelib_multinary_features(spans):
multinary_tablelib_feats[candidate.id].add((f, v))
for f, v in multinary_tablelib_feats[candidate.id]:
yield candidate.id, FEAT_PRE + f, v
def _tablelib_unary_features(span: SpanMention) -> Iterator[Tuple[str, int]]:
"""Table-/structure-related features for a single span."""
if not span.sentence.is_tabular():
return
sentence = span.sentence
for attrib in settings["featurization"]["tabular"]["unary_features"]["attrib"]:
for ngram in get_cell_ngrams(
span,
n_max=settings["featurization"]["tabular"]["unary_features"][
"get_cell_ngrams"
]["max"],
attrib=attrib,
):
yield f"CELL_{attrib.upper()}_[{ngram}]", DEF_VALUE
for row_num in range(sentence.row_start, sentence.row_end + 1):
yield f"ROW_NUM_[{row_num}]", DEF_VALUE
for col_num in range(sentence.col_start, sentence.col_end + 1):
yield f"COL_NUM_[{col_num}]", DEF_VALUE
# NOTE: These two features could be accounted for by HTML_ATTR in
# structural features
yield f"ROW_SPAN_[{num_rows(sentence)}]", DEF_VALUE
yield f"COL_SPAN_[{num_cols(sentence)}]", DEF_VALUE
for axis in ["row", "col"]:
for ngram in get_head_ngrams(
span,
axis,
n_max=settings["featurization"]["tabular"]["unary_features"][
"get_head_ngrams"
]["max"],
attrib=attrib,
):
yield f"{axis.upper()}_HEAD_{attrib.upper()}_[{ngram}]", DEF_VALUE
for ngram in get_row_ngrams(
span,
n_max=settings["featurization"]["tabular"]["unary_features"][
"get_row_ngrams"
]["max"],
attrib=attrib,
):
yield f"ROW_{attrib.upper()}_[{ngram}]", DEF_VALUE
for ngram in get_col_ngrams(
span,
n_max=settings["featurization"]["tabular"]["unary_features"][
"get_col_ngrams"
]["max"],
attrib=attrib,
):
yield f"COL_{attrib.upper()}_[{ngram}]", DEF_VALUE
# TODO:
# for ngram in get_row_ngrams(
# span, n_max=2, attrib=attrib, direct=False, infer=True
# ):
# yield "ROW_INFERRED_%s_[%s]" % (attrib.upper(), ngram), DEF_VALUE
# for ngram in get_col_ngrams(
# span, n_max=2, attrib=attrib, direct=False, infer=True
# ):
# yield "COL_INFERRED_%s_[%s]" % (attrib.upper(), ngram), DEF_VALUE
def _tablelib_multinary_features(
spans: Tuple[SpanMention, ...]
) -> Iterator[Tuple[str, int]]:
"""Table-/structure-related features for multiple spans."""
multinary_features = settings["featurization"]["tabular"]["multinary_features"]
span_sentences = [span.sentence for span in spans]
if all([sentence.is_tabular() for sentence in span_sentences]):
span_tables = [sentence.table for sentence in span_sentences]
if span_tables[1:] == span_tables[:-1]:
yield "SAME_TABLE", DEF_VALUE
if all([span.sentence.cell is not None for span in spans]):
row_diff = min_row_diff(
span_sentences,
absolute=multinary_features["min_row_diff"]["absolute"],
)
col_diff = min_col_diff(
span_sentences,
absolute=multinary_features["min_col_diff"]["absolute"],
)
yield f"SAME_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE
yield f"SAME_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE
yield (
f"SAME_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]"
), DEF_VALUE
span_cells = [sentence.cell for sentence in span_sentences]
if span_cells[1:] == span_cells[:-1]:
yield "SAME_CELL", DEF_VALUE
word_diff = sum(
[
s1.get_word_start_index() - s2.get_word_start_index()
for s1, s2 in zip(spans[:-1], spans[1:])
]
)
yield (f"WORD_DIFF_[{word_diff}]"), DEF_VALUE
char_diff = sum(
[
s1.char_start - s2.char_start
for s1, s2 in zip(spans[:-1], spans[1:])
]
)
yield (f"CHAR_DIFF_[{char_diff}]"), DEF_VALUE
if [span_sentences[1:] == span_sentences[:-1]]:
yield "SAME_SENTENCE", DEF_VALUE
else:
if all([sentence.cell is not None for sentence in span_sentences]):
yield "DIFF_TABLE", DEF_VALUE
row_diff = min_row_diff(
span_sentences,
absolute=multinary_features["min_row_diff"]["absolute"],
)
col_diff = min_col_diff(
span_sentences,
absolute=multinary_features["min_col_diff"]["absolute"],
)
yield f"DIFF_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE
yield f"DIFF_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE
yield (
f"DIFF_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]"
), DEF_VALUE
| fonduer-master | src/fonduer/features/feature_libs/tabular_features.py |
"""Fonduer textual feature extractor."""
from builtins import range
from typing import Any, Callable, Dict, Iterator, List, Set, Tuple, Union
from treedlib import (
Children,
Compile,
Indicator,
LeftNgrams,
LeftSiblings,
Mention,
Ngrams,
Parents,
RightNgrams,
RightSiblings,
compile_relation_feature_generator,
)
from fonduer.candidates.models import Candidate, ImplicitSpanMention, SpanMention
from fonduer.features.feature_libs.tree_structs import corenlp_to_xmltree
from fonduer.utils.config import get_config
from fonduer.utils.data_model_utils import get_left_ngrams, get_right_ngrams
from fonduer.utils.utils import get_as_dict, tokens_to_ngrams
DEF_VALUE = 1
unary_ddlib_feats: Dict[str, Set] = {}
unary_word_feats: Dict[str, Set] = {}
unary_tdl_feats: Dict[str, Set] = {}
multinary_tdl_feats: Dict[str, Set] = {}
settings = get_config()
def extract_textual_features(
candidates: Union[Candidate, List[Candidate]],
) -> Iterator[Tuple[int, str, int]]:
"""Extract textual features.
:param candidates: A list of candidates to extract features from
"""
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
args = tuple([m.context for m in candidate.get_mentions()])
if not (isinstance(args[0], (SpanMention, ImplicitSpanMention))):
raise ValueError(
f"Accepts Span/ImplicitSpan-type mentions, {type(args[0])}-type found."
)
# Unary candidates
if len(args) == 1:
span: Union[SpanMention, ImplicitSpanMention] = args[0]
if span.sentence.is_lingual():
get_tdl_feats = _compile_entity_feature_generator()
xmltree = corenlp_to_xmltree(span.sentence)
sidxs = list(
range(span.get_word_start_index(), span.get_word_end_index() + 1)
)
if len(sidxs) > 0:
# Add DDLIB entity features
for f in _get_ddlib_feats(span, get_as_dict(span.sentence), sidxs):
yield candidate.id, f"DDL_{f}", DEF_VALUE
# Add TreeDLib entity features
if span.stable_id not in unary_tdl_feats:
unary_tdl_feats[span.stable_id] = set()
for f in get_tdl_feats(xmltree.root, sidxs):
unary_tdl_feats[span.stable_id].add(f)
for f in unary_tdl_feats[span.stable_id]:
yield candidate.id, f"TDL_{f}", DEF_VALUE
for f in _get_word_feats(span):
yield candidate.id, f"BASIC_{f}", DEF_VALUE
# Multinary candidates
else:
spans = args
if all([span.sentence.is_lingual() for span in spans]):
get_tdl_feats = compile_relation_feature_generator(is_multary=True)
sents = [get_as_dict(span.sentence) for span in spans]
xmltree = corenlp_to_xmltree(spans[0].sentence)
s_idxs = [
list(
range(
span.get_word_start_index(), span.get_word_end_index() + 1
)
)
for span in spans
]
if all([len(s_idx) > 0 for s_idx in s_idxs]):
# Add DDLIB entity features for relation
for span, sent, s_idx, i in zip(
spans, sents, s_idxs, range(len(spans))
):
for f in _get_ddlib_feats(span, sent, s_idx):
yield candidate.id, f"DDL_e{i}_{f}", DEF_VALUE
# Add TreeDLib relation features
if candidate.id not in multinary_tdl_feats:
multinary_tdl_feats[candidate.id] = set()
for f in get_tdl_feats(xmltree.root, s_idxs):
multinary_tdl_feats[candidate.id].add(f)
for f in multinary_tdl_feats[candidate.id]:
yield candidate.id, f"TDL_{f}", DEF_VALUE
for i, span in enumerate(spans):
for f in _get_word_feats(span):
yield candidate.id, f"BASIC_e{i}_{f}", DEF_VALUE
def _compile_entity_feature_generator() -> Callable:
"""Compile entity feature generator.
Given optional arguments, returns a generator function which accepts an xml
root and a list of indexes for a mention, and will generate relation
features for this entity.
"""
BASIC_ATTRIBS_REL = ["lemma", "dep_label"]
m = Mention(0)
# Basic relation feature templates
temps = [
[Indicator(m, a) for a in BASIC_ATTRIBS_REL],
Indicator(m, "dep_label,lemma"),
# The *first element on the* path to the root: ngram lemmas along it
Ngrams(Parents(m, 3), "lemma", (1, 3)),
Ngrams(Children(m), "lemma", (1, 3)),
# The siblings of the mention
[LeftNgrams(LeftSiblings(m), a) for a in BASIC_ATTRIBS_REL],
[RightNgrams(RightSiblings(m), a) for a in BASIC_ATTRIBS_REL],
]
# return generator function
return Compile(temps).apply_mention
def _get_ddlib_feats(
span: SpanMention, context: Dict[str, Any], idxs: List[int]
) -> Iterator[str]:
"""Minimalist port of generic mention features from ddlib."""
if span.stable_id not in unary_ddlib_feats:
unary_ddlib_feats[span.stable_id] = set()
for seq_feat in _get_seq_features(context, idxs):
unary_ddlib_feats[span.stable_id].add(seq_feat)
for window_feat in _get_window_features(context, idxs):
unary_ddlib_feats[span.stable_id].add(window_feat)
for f in unary_ddlib_feats[span.stable_id]:
yield f
def _get_seq_features(context: Dict[str, Any], idxs: List[int]) -> Iterator[str]:
yield f"WORD_SEQ_[{' '.join(context['words'][i] for i in idxs)}]"
yield f"LEMMA_SEQ_[{' '.join(context['lemmas'][i] for i in idxs)}]"
yield f"POS_SEQ_[{' '.join(context['pos_tags'][i] for i in idxs)}]"
yield f"DEP_SEQ_[{' '.join(context['dep_labels'][i] for i in idxs)}]"
def _get_window_features(
context: Dict[str, Any],
idxs: List[int],
window: int = settings["featurization"]["textual"]["window_feature"]["size"],
combinations: bool = settings["featurization"]["textual"]["window_feature"][
"combinations"
],
isolated: bool = settings["featurization"]["textual"]["window_feature"]["isolated"],
) -> Iterator[str]:
left_lemmas = []
left_pos_tags = []
right_lemmas = []
right_pos_tags = []
try:
for i in range(1, window + 1):
lemma = context["lemmas"][idxs[0] - i]
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
left_lemmas.append(lemma)
left_pos_tags.append(context["pos_tags"][idxs[0] - i])
except IndexError:
pass
left_lemmas.reverse()
left_pos_tags.reverse()
try:
for i in range(1, window + 1):
lemma = context["lemmas"][idxs[-1] + i]
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
right_lemmas.append(lemma)
right_pos_tags.append(context["pos_tags"][idxs[-1] + i])
except IndexError:
pass
if isolated:
for i in range(len(left_lemmas)):
yield f"W_LEFT_{i + 1}_[{' '.join(left_lemmas[-i - 1 :])}]"
yield f"W_LEFT_POS_{i + 1}_[{' '.join(left_pos_tags[-i - 1 :])}]"
for i in range(len(right_lemmas)):
yield f"W_RIGHT_{i + 1}_[{' '.join(right_lemmas[: i + 1])}]"
yield f"W_RIGHT_POS_{i + 1}_[{' '.join(right_pos_tags[: i + 1])}]"
if combinations:
for i in range(len(left_lemmas)):
curr_left_lemmas = " ".join(left_lemmas[-i - 1 :])
try:
curr_left_pos_tags = " ".join(left_pos_tags[-i - 1 :])
except TypeError:
new_pos_tags = []
for pos in left_pos_tags[-i - 1 :]:
to_add = pos
if not to_add:
to_add = "None"
new_pos_tags.append(to_add)
curr_left_pos_tags = " ".join(new_pos_tags)
for j in range(len(right_lemmas)):
curr_right_lemmas = " ".join(right_lemmas[: j + 1])
try:
curr_right_pos_tags = " ".join(right_pos_tags[: j + 1])
except TypeError:
new_pos_tags = []
for pos in right_pos_tags[: j + 1]:
to_add = pos
if not to_add:
to_add = "None"
new_pos_tags.append(to_add)
curr_right_pos_tags = " ".join(new_pos_tags)
yield (
f"W_LEMMA_L_{i + 1}_R_{j + 1}_"
f"[{curr_left_lemmas}]_[{curr_right_lemmas}]"
)
yield (
f"W_POS_L_{i + 1}_R_{j + 1}_"
f"[{curr_left_pos_tags}]_[{curr_right_pos_tags}]"
)
def _get_word_feats(span: SpanMention) -> Iterator[str]:
attrib = "words"
if span.stable_id not in unary_word_feats:
unary_word_feats[span.stable_id] = set()
for ngram in tokens_to_ngrams(span.get_attrib_tokens(attrib), n_min=1, n_max=2):
feature = f"CONTAINS_{attrib.upper()}_[{ngram}]"
unary_word_feats[span.stable_id].add(feature)
for ngram in get_left_ngrams(
span,
window=settings["featurization"]["textual"]["word_feature"]["window"],
n_max=2,
attrib=attrib,
):
feature = f"LEFT_{attrib.upper()}_[{ngram}]"
unary_word_feats[span.stable_id].add(feature)
for ngram in get_right_ngrams(
span,
window=settings["featurization"]["textual"]["word_feature"]["window"],
n_max=2,
attrib=attrib,
):
feature = f"RIGHT_{attrib.upper()}_[{ngram}]"
unary_word_feats[span.stable_id].add(feature)
unary_word_feats[span.stable_id].add(
(
f"SPAN_TYPE_["
f"{'IMPLICIT' if isinstance(span, ImplicitSpanMention) else 'EXPLICIT'}"
f"]"
)
)
if span.get_span()[0].isupper():
unary_word_feats[span.stable_id].add("STARTS_WITH_CAPITAL")
unary_word_feats[span.stable_id].add(f"LENGTH_{span.get_num_words()}")
for f in unary_word_feats[span.stable_id]:
yield f
| fonduer-master | src/fonduer/features/feature_libs/textual_features.py |
"""Fonduer's feature library module."""
from fonduer.features.feature_libs.structural_features import (
extract_structural_features,
)
from fonduer.features.feature_libs.tabular_features import extract_tabular_features
from fonduer.features.feature_libs.textual_features import extract_textual_features
from fonduer.features.feature_libs.visual_features import extract_visual_features
__all__ = [
"extract_textual_features",
"extract_structural_features",
"extract_tabular_features",
"extract_visual_features",
]
| fonduer-master | src/fonduer/features/feature_libs/__init__.py |
"""Fonduer tree structs."""
import re
from functools import lru_cache
from typing import Any, Dict, List, Optional, Union
from lxml import etree as et
from lxml.etree import _Element
from fonduer.parser.models import Sentence
from fonduer.utils.utils import get_as_dict
class XMLTree:
"""A generic tree representation which takes XML as input.
Includes subroutines for conversion to JSON & for visualization based on js
form
"""
def __init__(self, xml_root: _Element, words: Optional[List[str]] = None) -> None:
"""Call subroutines to generate JSON form of XML input."""
self.root = xml_root
self.words = words
# create a unique id for e.g. canvas id in notebook
self.id = str(abs(hash(self.to_str())))
def _to_json(self, root: _Element) -> Dict:
children: List[Dict] = []
for c in root:
children.append(self._to_json(c))
js = {"attrib": dict(root.attrib), "children": children}
return js
def to_json(self) -> Dict:
"""Convert to json."""
return self._to_json(self.root)
def to_str(self) -> bytes:
"""Convert to string."""
return et.tostring(self.root)
@lru_cache(maxsize=1024)
def corenlp_to_xmltree(obj: Union[Dict, Sentence], prune_root: bool = True) -> XMLTree:
"""Convert CoreNLP attributes into an XMLTree.
Transform an object with CoreNLP dep_path and dep_parent attributes into
an XMLTree. Will include elements of any array having the same dimension
as dep_* as node attributes. Also adds special word_idx attribute
corresponding to original sequence order in sentence.
"""
# Convert input object to dictionary
s: Dict = get_as_dict(obj)
# Use the dep_parents array as a guide: ensure it is present and a list of ints
if not ("dep_parents" in s and isinstance(s["dep_parents"], list)):
raise ValueError(
"Input CoreNLP object must have a 'dep_parents' attribute which is a list"
)
try:
dep_parents = list(map(int, s["dep_parents"]))
except Exception:
raise ValueError("'dep_parents' attribute must be a list of ints")
# Also ensure that we are using CoreNLP-native indexing
# (root=0, 1-base word indexes)!
b = min(dep_parents)
if b != 0:
dep_parents = list(map(lambda j: j - b, dep_parents))
# Parse recursively
root = corenlp_to_xmltree_sub(s, dep_parents, 0)
# Often the return tree will have several roots, where one is the actual
# root and the rest are just singletons not included in the dep tree
# parse...
# We optionally remove these singletons and then collapse the root if only
# one child left.
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root, words=s["words"])
def scrub(s: str) -> str:
"""Scrub the string.
:param s: The input string.
:return: The scrubbed string.
"""
return "".join(c for c in s if ord(c) < 128)
def corenlp_to_xmltree_sub(
s: Dict[str, Any], dep_parents: List[int], rid: int = 0
) -> _Element:
"""Construct XMLTree with CoreNLP information.
:param s: Input object.
:param dep_parents: Dependency parents.
:param rid: Root id, defaults to 0
:return: The constructed XMLTree.
"""
i = rid - 1
attrib = {}
N = len(list(dep_parents))
# Add all attributes that have the same shape as dep_parents
if i >= 0:
for k, v in list(
filter(lambda t: isinstance(t[1], list) and len(t[1]) == N, s.items())
):
if v[i] is not None:
attrib[singular(k)] = (
scrub(v[i]).encode("ascii", "ignore")
if hasattr(v[i], "encode")
else str(v[i])
)
# Add word_idx if not present
if "word_idx" not in attrib:
attrib["word_idx"] = str(i)
# Build tree recursively
root = et.Element("node", attrib=attrib)
for i, d in enumerate(dep_parents):
if d == rid:
root.append(corenlp_to_xmltree_sub(s, dep_parents, i + 1))
return root
def singular(s: str) -> str:
"""Get singular form of word s (crudely).
:param s: The input string.
:return: The singular form of the string.
"""
return re.sub(r"e?s$", "", s, flags=re.I)
| fonduer-master | src/fonduer/features/feature_libs/tree_structs.py |
"""Fonduer structural feature extractor."""
from typing import Dict, Iterator, List, Set, Tuple, Union
from fonduer.candidates.models import Candidate
from fonduer.candidates.models.span_mention import SpanMention, TemporarySpanMention
from fonduer.utils.data_model_utils import (
common_ancestor,
get_ancestor_class_names,
get_ancestor_id_names,
get_ancestor_tag_names,
get_attributes,
get_next_sibling_tags,
get_parent_tag,
get_prev_sibling_tags,
get_tag,
lowest_common_ancestor_depth,
)
FEATURE_PREFIX = "STR_"
DEF_VALUE = 1
unary_strlib_feats: Dict[str, Set[Tuple[str, int]]] = {}
multinary_strlib_feats: Dict[str, Set[Tuple[str, int]]] = {}
def extract_structural_features(
candidates: Union[Candidate, List[Candidate]],
) -> Iterator[Tuple[int, str, int]]:
"""Extract structural features.
:param candidates: A list of candidates to extract features from
"""
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
args = tuple([m.context for m in candidate.get_mentions()])
if any(not (isinstance(arg, TemporarySpanMention)) for arg in args):
raise ValueError(
f"Structural feature only accepts Span-type arguments, "
f"{type(candidate)}-type found."
)
# Unary candidates
if len(args) == 1:
span = args[0]
if span.sentence.is_structural():
if span.stable_id not in unary_strlib_feats:
unary_strlib_feats[span.stable_id] = set()
for feature, value in _strlib_unary_features(span):
unary_strlib_feats[span.stable_id].add((feature, value))
for feature, value in unary_strlib_feats[span.stable_id]:
yield candidate.id, FEATURE_PREFIX + feature, value
# Multinary candidates
else:
spans = args
if all([span.sentence.is_structural() for span in spans]):
for i, span in enumerate(spans):
prefix = f"e{i}_"
if span.stable_id not in unary_strlib_feats:
unary_strlib_feats[span.stable_id] = set()
for feature, value in _strlib_unary_features(span):
unary_strlib_feats[span.stable_id].add((feature, value))
for feature, value in unary_strlib_feats[span.stable_id]:
yield candidate.id, FEATURE_PREFIX + prefix + feature, value
if candidate.id not in multinary_strlib_feats:
multinary_strlib_feats[candidate.id] = set()
for feature, value in _strlib_multinary_features(spans):
multinary_strlib_feats[candidate.id].add((feature, value))
for feature, value in multinary_strlib_feats[candidate.id]:
yield candidate.id, FEATURE_PREFIX + feature, value
def _strlib_unary_features(span: SpanMention) -> Iterator[Tuple[str, int]]:
"""Structural-related features for a single span."""
if not span.sentence.is_structural():
return
yield f"TAG_{get_tag(span)}", DEF_VALUE
for attr in get_attributes(span):
yield f"HTML_ATTR_{attr}", DEF_VALUE
yield f"PARENT_TAG_{get_parent_tag(span)}", DEF_VALUE
prev_tags = get_prev_sibling_tags(span)
if len(prev_tags):
yield f"PREV_SIB_TAG_{prev_tags[-1]}", DEF_VALUE
yield f"NODE_POS_{len(prev_tags) + 1}", DEF_VALUE
else:
yield "FIRST_NODE", DEF_VALUE
next_tags = get_next_sibling_tags(span)
if len(next_tags):
yield f"NEXT_SIB_TAG_{next_tags[0]}", DEF_VALUE
else:
yield "LAST_NODE", DEF_VALUE
yield f"ANCESTOR_CLASS_[{' '.join(get_ancestor_class_names(span))}]", DEF_VALUE
yield f"ANCESTOR_TAG_[{' '.join(get_ancestor_tag_names(span))}]", DEF_VALUE
yield f"ANCESTOR_ID_[{' '.join(get_ancestor_id_names(span))}]", DEF_VALUE
def _strlib_multinary_features(
spans: Tuple[SpanMention, ...]
) -> Iterator[Tuple[str, int]]:
"""Structural-related features for multiple spans."""
yield f"COMMON_ANCESTOR_[{' '.join(common_ancestor(spans))}]", DEF_VALUE
yield (
f"LOWEST_ANCESTOR_DEPTH_[" f"{lowest_common_ancestor_depth(spans)}]"
), DEF_VALUE
| fonduer-master | src/fonduer/features/feature_libs/structural_features.py |
"""Fonduer's feature model module."""
from fonduer.features.models.feature import Feature, FeatureKey
__all__ = ["Feature", "FeatureKey"]
| fonduer-master | src/fonduer/features/models/__init__.py |
"""Fonduer feature model."""
from sqlalchemy import Column, Float
from sqlalchemy.dialects import postgresql
from fonduer.meta import Meta
from fonduer.utils.models.annotation import AnnotationKeyMixin, AnnotationMixin
class FeatureKey(AnnotationKeyMixin, Meta.Base):
"""A feature's key that identifies the definition of the Feature."""
pass
class Feature(AnnotationMixin, Meta.Base):
"""An element of a representation of a Candidate in a feature space.
A Feature's annotation key identifies the definition of the Feature, e.g.,
a function that implements it or the library name and feature name in an
automatic featurization library.
"""
#: A list of floating point values for each Key.
values = Column(postgresql.ARRAY(Float), nullable=False)
| fonduer-master | src/fonduer/features/models/feature.py |
"""Fonduer basic config and config utils."""
import logging
import os
from typing import Dict
import yaml
MAX_CONFIG_SEARCH_DEPTH = 25 # Max num of parent directories to look for config
logger = logging.getLogger(__name__)
default = {
"featurization": {
"textual": {
"window_feature": {"size": 3, "combinations": True, "isolated": True},
"word_feature": {"window": 7},
},
"tabular": {
"unary_features": {
"attrib": ["words"],
"get_cell_ngrams": {"max": 2},
"get_head_ngrams": {"max": 2},
"get_row_ngrams": {"max": 2},
"get_col_ngrams": {"max": 2},
},
"multinary_features": {
"min_row_diff": {"absolute": False},
"min_col_diff": {"absolute": False},
},
},
},
"learning": {
"LSTM": {
"emb_dim": 100,
"hidden_dim": 100,
"attention": True,
"dropout": 0.1,
"bidirectional": True,
"bias": False,
},
"LogisticRegression": {"hidden_dim": 100, "bias": False},
},
}
def _merge(x: Dict, y: Dict) -> Dict:
"""Merge two nested dictionaries. Overwrite values in x with values in y."""
merged = {**x, **y}
xkeys = x.keys()
for key in xkeys:
if isinstance(x[key], dict) and key in y:
merged[key] = _merge(x[key], y[key])
return merged
def get_config(path: str = os.getcwd()) -> Dict:
"""Search for settings file in root of project and its parents."""
config = default
tries = 0
current_dir = path
while current_dir and tries < MAX_CONFIG_SEARCH_DEPTH:
potential_path = os.path.join(current_dir, ".fonduer-config.yaml")
if os.path.exists(potential_path):
with open(potential_path, "r") as f:
config = _merge(config, yaml.safe_load(f))
logger.debug(f"Loading Fonduer config from {potential_path}.")
break
new_dir = os.path.split(current_dir)[0]
if current_dir == new_dir:
logger.debug("Unable to find config file. Using defaults.")
break
current_dir = new_dir
tries += 1
return config
| fonduer-master | src/fonduer/utils/config.py |
"""Fonduer visual utils."""
import warnings
from typing import NamedTuple
class Bbox(NamedTuple):
"""Bounding box."""
page: int
top: int
bottom: int
left: int
right: int
def bbox_from_span(span) -> Bbox: # type: ignore
"""Get bounding box from span.
:param span: The input span.
:return: The bounding box of the span.
"""
warnings.warn(
"bbox_from_span(span) is deprecated. Use span.get_bbox() instead.",
DeprecationWarning,
)
from fonduer.candidates.models.span_mention import TemporarySpanMention # noqa
if isinstance(span, TemporarySpanMention) and span.sentence.is_visual():
return Bbox(
span.get_attrib_tokens("page")[0],
min(span.get_attrib_tokens("top")),
max(span.get_attrib_tokens("bottom")),
min(span.get_attrib_tokens("left")),
max(span.get_attrib_tokens("right")),
)
else:
return None
def bbox_from_sentence(sentence) -> Bbox: # type: ignore
"""Get bounding box from sentence.
:param sentence: The input sentence.
:return: The bounding box of the sentence.
"""
warnings.warn(
"bbox_from_sentence(sentence) is deprecated. Use sentence.get_bbox() instead.",
DeprecationWarning,
)
from fonduer.parser.models import Sentence # noqa
# TODO: this may have issues where a sentence is linked to words on different pages
if isinstance(sentence, Sentence) and sentence.is_visual():
return Bbox(
sentence.page[0],
min(sentence.top),
max(sentence.bottom),
min(sentence.left),
max(sentence.right),
)
else:
return None
def bbox_horz_aligned(box1: Bbox, box2: Bbox) -> bool:
"""Check two bounding boxes are horizontally aligned.
Return true if the vertical center point of either span is within the
vertical range of the other
"""
if not (box1 and box2):
return False
# NEW: any overlap counts
# return box1.top <= box2.bottom and box2.top <= box1.bottom
box1_top = box1.top + 1.5
box2_top = box2.top + 1.5
box1_bottom = box1.bottom - 1.5
box2_bottom = box2.bottom - 1.5
return not (box1_top > box2_bottom or box2_top > box1_bottom)
# return not (box1.top >= box2.bottom or box2.top >= box1.bottom)
# center1 = (box1.bottom + box1.top) / 2.0
# center2 = (box2.bottom + box2.top) / 2.0
# return ((center1 >= box2.top and center1 <= box2.bottom) or
# (center2 >= box1.top and center2 <= box1.bottom))
def bbox_vert_aligned(box1: Bbox, box2: Bbox) -> bool:
"""Check two bounding boxes are vertical aligned.
Return true if the horizontal center point of either span is within the
horizontal range of the other
"""
if not (box1 and box2):
return False
# NEW: any overlap counts
# return box1.left <= box2.right and box2.left <= box1.right
box1_left = box1.left + 1.5
box2_left = box2.left + 1.5
box1_right = box1.right - 1.5
box2_right = box2.right - 1.5
return not (box1_left > box2_right or box2_left > box1_right)
# center1 = (box1.right + box1.left) / 2.0
# center2 = (box2.right + box2.left) / 2.0
# return ((center1 >= box2.left and center1 <= box2.right) or
# (center2 >= box1.left and center2 <= box1.right))
def bbox_vert_aligned_left(box1: Bbox, box2: Bbox) -> bool:
"""Check two boxes' left boundaries are with 2pts.
Return true if the left boundary of both boxes is within 2 pts.
"""
if not (box1 and box2):
return False
return abs(box1.left - box2.left) <= 2
def bbox_vert_aligned_right(box1: Bbox, box2: Bbox) -> bool:
"""Check two boxes' right boundaries are with 2pts.
Return true if the right boundary of both boxes is within 2 pts.
"""
if not (box1 and box2):
return False
return abs(box1.right - box2.right) <= 2
def bbox_vert_aligned_center(box1: Bbox, box2: Bbox) -> bool:
"""Check two boxes' centers are with 5pts.
Return true if the center of both boxes is within 5 pts.
"""
if not (box1 and box2):
return False
return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5
| fonduer-master | src/fonduer/utils/utils_visual.py |
"""Fonduer tabular utils."""
import itertools
from builtins import range
from functools import lru_cache
from typing import List, Optional, Tuple, Union
from fonduer.parser.models.sentence import Sentence
from fonduer.parser.models.table import Cell
@lru_cache(maxsize=1024)
def _min_range_diff(coordinates: Tuple[Tuple[int, int]], absolute: bool = True) -> int:
"""Get the minimum range difference.
# Using Tuple instead of list because list is unhashable with `lru_cache`
# if absolute=True, return the absolute value of minimum magnitude difference
# if absolute=False, return the raw value of minimum magnitude difference
# TODO: move back to efficient implementation once it sees that
# min_range_diff(3,3,2,3) = 0 return max(0, max(a_end - b_start, b_end -
# a_start))
:param coordinates: A tuple of a couple (start, end) indexes of the objects.
:param absolute: Whether use absolute value, defaults to True.
:return: The minimum range difference.
"""
f = lambda x: (abs(x) if absolute else x)
return min(
[
f(min([x - y for x, y in zip(ii[:-1], ii[1:])], key=abs))
for ii in itertools.product(
*[range(start, end + 1) for start, end in coordinates]
)
],
key=abs,
)
def min_row_diff(cells: List[Union[Cell, Sentence]], absolute: bool = True) -> int:
"""Get the minimum row difference of two sentences or cells.
:param cells: The list of cells or sentences.
:param absolute: Whether use absolute value, defaults to True.
:return: The minimum row difference.
"""
coordinates = [(cell.row_start, cell.row_end) for cell in cells]
return _min_range_diff(tuple(coordinates), absolute=absolute)
def min_col_diff(cells: List[Union[Cell, Sentence]], absolute: bool = True) -> int:
"""Get the minimum column difference of two sentences or cells.
:param cells: The list of cells or sentences.
:param absolute: Whether use absolute value, defaults to True.
:return: The minimum column difference.
"""
coordinates = [(cell.col_start, cell.col_end) for cell in cells]
return _min_range_diff(tuple(coordinates), absolute=absolute)
def min_axis_diff(
a: Union[Cell, Sentence],
b: Union[Cell, Sentence],
axis: Optional[str] = None,
absolute: bool = True,
) -> int:
"""Get the minimum axis difference of two sentences or cells.
:param a: The first cell or sentence.
:param b: The second cell or sentence.
:param axis: The axis to calculate the difference, defaults to None.
:return: The minimum axis difference.
"""
if axis == "row":
return min_row_diff([a, b], absolute)
elif axis == "col":
return min_col_diff([a, b], absolute)
else:
return min(min_row_diff([a, b], absolute), min_col_diff([a, b], absolute))
def is_row_aligned(
a: Union[Cell, Sentence], b: Union[Cell, Sentence], spread: List[int] = [0, 0]
) -> bool:
"""Check two sentences or cells are row-wise aligned.
:param a: The first cell or sentence.
:param b: The second cell or sentence.
:param spread: Row difference range, defaults to [0, 0].
:return: Return True if two sentences or cells are row-wise aligned.
"""
return min_row_diff([a, b]) in range(spread[0], spread[1] + 1)
def is_col_aligned(
a: Union[Sentence, Cell], b: Union[Cell, Sentence], spread: List[int] = [0, 0]
) -> bool:
"""Check two sentences or cells are column-wise aligned.
:param a: The first cell or sentence.
:param b: The second cell or sentence.
:param spread: Column difference range, defaults to [0, 0].
:return: Return True if two sentences or cells are column-wise aligned.
"""
return min_col_diff([a, b]) in range(spread[0], spread[1] + 1)
def is_axis_aligned(
a: Union[Cell, Sentence],
b: Union[Cell, Sentence],
axis: Optional[str] = None,
spread: List[int] = [0, 0],
) -> bool:
"""Check two sentences or cells are axis-wise aligned.
:param a: The first cell or sentence.
:param b: The second cell or sentence.
:param axis: The axis to calculate the alignment, defaults to None.
:param spread: Row/column difference range, defaults to [0, 0].
:return: Return True if two sentences or cells are axis-wise aligned.
"""
if axis == "row":
return is_row_aligned(a, b, spread=spread)
elif axis == "col":
return is_col_aligned(a, b, spread=spread)
else:
return is_row_aligned(a, b, spread=spread) or is_col_aligned(
a, b, spread=spread
)
def num_rows(a: Union[Cell, Sentence]) -> int:
"""Get number of rows that sentence or cell spans.
:param a: The cell or sentence.
:return: The number of rows that sentence or cell spans.
"""
return a.row_start - a.row_end + 1
def num_cols(a: Union[Cell, Sentence]) -> int:
"""Get number of columns that sentence or cell spans.
:param a: The cell or sentence.
:return: The number of columns that sentence or cell spans.
"""
return a.col_start - a.col_end + 1
| fonduer-master | src/fonduer/utils/utils_table.py |
"""Fonduer's utils module."""
| fonduer-master | src/fonduer/utils/__init__.py |
"""Fonduer parser utils."""
from typing import List, Optional, Tuple
def build_node(type: str, name: str, content: str) -> str:
"""
Wrap up content in to a html node.
:param type: content type (e.g., doc, section, text, figure)
:param name: content name (e.g., the name of the section)
:param name: actual content
:return: new String with content in html format
"""
if type == "doc":
return f"<html>{content}</html>"
if type == "section":
return f"<section name='{name}'>{content}</section>"
if type == "text":
return f"<p name='{name}'>{content}</p>"
if type == "figure":
return f"<img name='{name}' src='{content}'/>"
raise RuntimeError("unknown type")
def column_constructor(
text: str,
name: Optional[str] = None,
type: str = "text",
delim: Optional[str] = None,
) -> List[Tuple[str, str, str]]:
"""Column constructor.
Convert raw content to a list of strutured tuple where each tuple contains
(type, name, content).
:param text: content to be converted ()
:param type: content name (default: None)
:param type: content type (default: text)
:param delim: delimiter to split the content
:return: A list of tuple where each tuple contains
(content type, content name, content)
"""
if delim is None:
return [(type, name, text)]
return [(type, name, content) for content in text.split(delim)]
| fonduer-master | src/fonduer/utils/utils_parser.py |
"""Fonduer UDF utils."""
import logging
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import numpy as np
from scipy.sparse import csr_matrix
from sqlalchemy import String, Table
from sqlalchemy.dialects.postgresql import ARRAY, insert
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import cast
from fonduer.candidates.models import Candidate
from fonduer.parser.models import Document
from fonduer.utils.models import AnnotationMixin
logger = logging.getLogger(__name__)
# Flag value to signal that no filtering on split should be applied. Not an
# integer to ensure that it won't conflict with a user's split value.
ALL_SPLITS = "ALL"
def _get_cand_values(candidate: Candidate, key_table: Table) -> List[AnnotationMixin]:
"""Get the corresponding values for the key_table."""
# NOTE: Import just before checking to avoid circular imports.
from fonduer.features.models import FeatureKey
from fonduer.supervision.models import GoldLabelKey, LabelKey
if key_table == FeatureKey:
return candidate.features
elif key_table == LabelKey:
return candidate.labels
elif key_table == GoldLabelKey:
return candidate.gold_labels
else:
raise ValueError(f"{key_table} is not a valid key table.")
def _batch_postgres_query(
table: Table, records: List[Dict[str, Any]]
) -> Iterator[List[Dict[str, Any]]]:
"""Break the list into chunks that can be processed as a single statement.
Postgres query cannot be too long or it will fail.
See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum-
length-constraint-for-a-postgres-query
:param records: The full list of records to batch.
:param table: The sqlalchemy table.
:return: A generator of lists of records.
"""
if not records:
return
POSTGRESQL_MAX = 0x3FFFFFFF
# Create preamble and measure its length
preamble = (
"INSERT INTO "
+ table.__tablename__
+ " ("
+ ", ".join(records[0].keys())
+ ") VALUES ("
+ ", ".join(["?"] * len(records[0].keys()))
+ ")\n"
)
start = 0
end = 0
total_len = len(preamble)
while end < len(records):
record_len = sum([len(str(v)) for v in records[end].values()])
# Pre-increment to include the end element in the slice
end += 1
if total_len + record_len >= POSTGRESQL_MAX:
logger.debug(f"Splitting query due to length ({total_len} chars).")
yield records[start:end]
start = end
# Reset the total query length
total_len = len(preamble)
else:
total_len += record_len
yield records[start:end]
def get_sparse_matrix_keys(session: Session, key_table: Table) -> List:
"""Return a list of keys for the sparse matrix."""
return session.query(key_table).order_by(key_table.name).all()
def batch_upsert_records(
session: Session, table: Table, records: List[Dict[str, Any]]
) -> None:
"""Batch upsert records into postgresql database."""
if not records:
return
for record_batch in _batch_postgres_query(table, records):
stmt = insert(table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=table.__table__.primary_key,
set_={
"keys": stmt.excluded.get("keys"),
"values": stmt.excluded.get("values"),
},
)
session.execute(stmt, record_batch)
session.commit()
def get_sparse_matrix(
session: Session,
key_table: Table,
cand_lists: Union[Sequence[Candidate], Iterable[Sequence[Candidate]]],
key: Optional[str] = None,
) -> List[csr_matrix]:
"""Load sparse matrix of GoldLabels for each candidate_class."""
result = []
cand_lists = cand_lists if isinstance(cand_lists, (list, tuple)) else [cand_lists]
for cand_list in cand_lists:
if len(cand_list) == 0:
raise ValueError("cand_lists contain empty cand_list.")
# Keys are used as a global index
if key:
key_names = [key]
else:
# Get all keys
all_keys = get_sparse_matrix_keys(session, key_table)
# Filter only keys that are used by this cand_list
key_names = [k.name for k in all_keys]
annotations: List[Dict[str, Any]] = []
for cand in cand_list:
annotation_mixins: List[AnnotationMixin] = _get_cand_values(cand, key_table)
if annotation_mixins:
annotations.append(
{
"keys": annotation_mixins[0].keys,
"values": annotation_mixins[0].values,
}
)
else:
annotations.append({"keys": [], "values": []})
result.append(_convert_mappings_to_matrix(annotations, key_names))
return result
def _convert_mappings_to_matrix(
mappings: List[Dict[str, Any]], keys: List[str]
) -> csr_matrix:
"""Convert a list of (annotation) mapping into a sparse matrix.
An annotation mapping is a dictionary representation of annotations like instances
of :class:`Label` and :class:`Feature`. For example, label.keys and label.values
corresponds to annotation["keys"] and annotation["values"].
Note that :func:`FeaturizerUDF.apply` returns a list of list of such a mapping,
where the outer list represents candidate_classes, while this method takes a list
of a mapping of each candidate_class.
:param mappings: a list of annotation mapping.
:param keys: a list of keys, which becomes columns of the matrix to be returned.
"""
# Create a mapping that maps key_name to column index)
keys_map = {key: keys.index(key) for key in keys}
indptr = [0]
indices = []
data = []
for mapping in mappings:
if mapping:
for key, value in zip(mapping["keys"], mapping["values"]):
if key in keys_map:
indices.append(keys_map[key])
data.append(value)
indptr.append(len(indices))
return csr_matrix((data, indices, indptr), shape=(len(mappings), len(keys)))
def unshift_label_matrix(L_sparse: csr_matrix) -> np.ndarray:
"""Unshift a sparse label matrix (ABSTAIN as 0) to a dense one (ABSTAIN as -1)."""
return L_sparse.toarray() - 1
def shift_label_matrix(L: np.ndarray) -> csr_matrix:
"""Shift a dense label matrix (ABSTAIN as -1) to a sparse one (ABSTAIN as 0)."""
return csr_matrix(L + 1)
def get_docs_from_split(
session: Session, candidate_classes: Iterable[Type[Candidate]], split: int
) -> Set[Document]:
"""Return a list of documents that contain the candidates in the split."""
# Only grab the docs containing candidates from the given split.
sub_query = session.query(Candidate.id).filter(Candidate.split == split).subquery()
split_docs: Set[Document] = set()
for candidate_class in candidate_classes:
split_docs.update(
cand.document
for cand in session.query(candidate_class)
.filter(candidate_class.id.in_(sub_query))
.all()
)
return split_docs
def get_mapping(
table: Table,
candidates: Iterable[Candidate],
generator: Callable[[Candidate], Iterator[Tuple]],
) -> Iterator[Dict[str, Any]]:
"""Generate map of keys and values for the candidate from the generator.
:param table: The table we will be inserting into (i.e. Feature or Label).
:param candidates: The candidates to get mappings for.
:param generator: A generator yielding (candidate_id, key, value) tuples.
:return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _}
"""
for cand in candidates:
# Grab the old values
if len(getattr(cand, table.__tablename__ + "s")) != 0:
temp = getattr(cand, table.__tablename__ + "s")[0]
cand_map = dict(zip(temp.keys, temp.values))
else:
cand_map = {}
for cid, key, value in generator(cand):
if value == 0:
# Make sure this key does not exist in cand_map
cand_map.pop(key, None)
continue
cand_map[key] = value
# Assemble label arguments
yield {
"candidate_id": cand.id,
"keys": [*cand_map.keys()],
"values": [*cand_map.values()],
}
def drop_all_keys(
session: Session, key_table: Table, candidate_classes: Iterable[Type[Candidate]]
) -> None:
"""Bulk drop annotation keys for all the candidate_classes in the table.
Rather than directly dropping the keys, this removes the candidate_classes
specified for the given keys only. If all candidate_classes are removed for
a key, the key is dropped.
:param key_table: The sqlalchemy class to insert into.
:param candidate_classes: A list of candidate classes to drop.
"""
if not candidate_classes:
return
set_of_candidate_classes: Set[str] = set(
[c.__tablename__ for c in candidate_classes]
)
# Select all rows that contain ANY of the candidate_classes
all_rows = (
session.query(key_table)
.filter(
key_table.candidate_classes.overlap(
cast(set_of_candidate_classes, ARRAY(String))
)
)
.all()
)
to_delete = set()
to_update = []
# All candidate classes will be the same for all keys, so just look at one
for row in all_rows:
# Remove the selected candidate_classes. If empty, mark for deletion.
row.candidate_classes = list(
set(row.candidate_classes) - set_of_candidate_classes
)
if len(row.candidate_classes) == 0:
to_delete.add(row.name)
else:
to_update.append(
{"name": row.name, "candidate_classes": row.candidate_classes}
)
# Perform all deletes
if to_delete:
query = session.query(key_table).filter(key_table.name.in_(to_delete))
query.delete(synchronize_session="fetch")
# Perform all updates
if to_update:
for batch in _batch_postgres_query(key_table, to_update):
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
session.execute(stmt, batch)
session.commit()
def drop_keys(session: Session, key_table: Table, keys: Dict) -> None:
"""Bulk drop annotation keys to the specified table.
Rather than directly dropping the keys, this removes the candidate_classes
specified for the given keys only. If all candidate_classes are removed for
a key, the key is dropped.
:param key_table: The sqlalchemy class to insert into.
:param keys: A map of {name: [candidate_classes]}.
"""
# Do nothing if empty
if not keys:
return
for key_batch in _batch_postgres_query(
key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()]
):
all_rows = (
session.query(key_table)
.filter(key_table.name.in_([key["name"] for key in key_batch]))
.all()
)
to_delete = set()
to_update = []
# All candidate classes will be the same for all keys, so just look at one
candidate_classes = key_batch[0]["candidate_classes"]
for row in all_rows:
# Remove the selected candidate_classes. If empty, mark for deletion.
row.candidate_classes = list(
set(row.candidate_classes) - set(candidate_classes)
)
if len(row.candidate_classes) == 0:
to_delete.add(row.name)
else:
to_update.append(
{"name": row.name, "candidate_classes": row.candidate_classes}
)
# Perform all deletes
if to_delete:
query = session.query(key_table).filter(key_table.name.in_(to_delete))
query.delete(synchronize_session="fetch")
# Perform all updates
if to_update:
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
session.execute(stmt, to_update)
session.commit()
def upsert_keys(session: Session, key_table: Table, keys: Dict) -> None:
"""Bulk add annotation keys to the specified table.
:param key_table: The sqlalchemy class to insert into.
:param keys: A map of {name: [candidate_classes]}.
"""
# Do nothing if empty
if not keys:
return
for key_batch in _batch_postgres_query(
key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()]
):
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
while True:
try:
session.execute(stmt, key_batch)
session.commit()
break
except Exception as e:
logger.debug(e)
| fonduer-master | src/fonduer/utils/utils_udf.py |
"""Fonduer utils."""
import re
from builtins import range
from typing import TYPE_CHECKING, Dict, Iterator, List, Set, Tuple, Type, Union
from fonduer.parser.models import Context, Document, Sentence
if TYPE_CHECKING: # to prevent circular imports
from fonduer.candidates.models import Candidate
def camel_to_under(name: str) -> str:
"""
Convert camel-case string to lowercase string separated by underscores.
Written by epost (http://stackoverflow.com/questions/1175208).
:param name: String to be converted
:return: new String with camel-case converted to lowercase, underscored
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def get_as_dict(x: Union[Dict, Sentence]) -> Dict:
"""Return an object as a dictionary of its attributes."""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__
def tokens_to_ngrams(
tokens: List[str],
n_min: int = 1,
n_max: int = 3,
delim: str = " ",
lower: bool = False,
) -> Iterator[str]:
"""Get n-grams from tokens."""
f = (lambda x: x.lower()) if lower else (lambda x: x)
N = len(tokens)
for root in range(N):
for n in range(max(n_min - 1, 0), min(n_max, N - root)):
yield f(delim.join(tokens[root : root + n + 1]))
def get_set_of_stable_ids(
doc: Document, candidate_class: "Type[Candidate]"
) -> Set[Tuple[str, ...]]:
"""Return a set of stable_ids of candidates.
A stable_id of a candidate is a tuple of stable_id of the constituent context.
"""
set_of_stable_ids = set()
# "s" is required due to the relationship between Document and candidate_class.
if hasattr(doc, candidate_class.__tablename__ + "s"):
set_of_stable_ids.update(
set(
[
tuple(m.context.get_stable_id() for m in c) if c else None
for c in getattr(doc, candidate_class.__tablename__ + "s")
]
)
)
return set_of_stable_ids
def get_dict_of_stable_id(doc: Document) -> Dict[str, Context]:
"""Return a mapping of a stable_id to its context."""
return {
doc.stable_id: doc,
**{
c.stable_id: c
for a in [
"sentences",
"paragraphs",
"captions",
"cells",
"tables",
"sections",
"figures",
]
for c in getattr(doc, a)
},
**{
c.stable_id: c
for s in doc.sentences
for a in ["spans", "implicit_spans"]
for c in getattr(s, a)
},
}
| fonduer-master | src/fonduer/utils/utils.py |
"""Fonduer UDF."""
import logging
from multiprocessing import Manager, Process
from queue import Queue
from threading import Thread
from typing import Any, Collection, Dict, List, Optional, Set, Type, Union
from sqlalchemy import inspect
from sqlalchemy.orm import Session, scoped_session, sessionmaker
from fonduer.meta import Meta, new_sessionmaker
from fonduer.parser.models.document import Document
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm.notebook import tqdm
logger = logging.getLogger(__name__)
class UDFRunner(object):
"""Class to run UDFs in parallel using simple queue-based multiprocessing setup."""
def __init__(
self,
session: Session,
udf_class: Type["UDF"],
parallelism: int = 1,
**udf_init_kwargs: Any,
) -> None:
"""Initialize UDFRunner."""
self.udf_class = udf_class
self.udf_init_kwargs = udf_init_kwargs
self.udfs: List["UDF"] = []
self.pb = None
self.session = session
self.parallelism = parallelism
#: The last set of documents that apply() was called on
self.last_docs: Set[str] = set()
def apply(
self,
doc_loader: Collection[
Document
], # doc_loader has __len__, but Iterable doesn't.
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
**kwargs: Any,
) -> None:
"""Apply the given UDF to the set of objects returned by the doc_loader.
Either single or multi-threaded, and optionally calling clear() first.
"""
# Clear everything downstream of this UDF if requested
if clear:
self.clear(**kwargs)
# Execute the UDF
logger.info("Running UDF...")
# Setup progress bar
if progress_bar:
logger.debug("Setting up progress bar...")
if hasattr(doc_loader, "__len__"):
self.pb = tqdm(total=len(doc_loader))
else:
logger.error("Could not determine size of progress bar")
# Use the parallelism of the class if none is provided to apply
parallelism = parallelism if parallelism else self.parallelism
self._apply(doc_loader, parallelism, clear=clear, **kwargs)
# Close progress bar
if self.pb is not None:
logger.debug("Closing progress bar...")
self.pb.close()
logger.debug("Running after_apply...")
self._after_apply(**kwargs)
def clear(self, **kwargs: Any) -> None:
"""Clear the associated data from the database."""
raise NotImplementedError()
def _after_apply(self, **kwargs: Any) -> None:
"""Execute this method by a single process after apply."""
pass
def _add(self, session: Session, instance: Any) -> None:
pass
def _apply(
self, doc_loader: Collection[Document], parallelism: int, **kwargs: Any
) -> None:
"""Run the UDF multi-threaded using python multiprocessing."""
if not Meta.postgres:
raise ValueError("Fonduer must use PostgreSQL as a database backend.")
# Create an input queue to feed documents to UDF workers
manager = Manager()
# Set maxsize (#435). The number is heuristically determined.
in_queue = manager.Queue(maxsize=parallelism * 2)
# Use an output queue to track multiprocess progress
out_queue = manager.Queue()
# Clear the last documents parsed by the last run
self.last_docs = set()
# Create DB session factory for insert data on each UDF (#545)
session_factory = new_sessionmaker()
# Create UDF Processes
for i in range(parallelism):
udf = self.udf_class(
session_factory=session_factory,
runner=self,
in_queue=in_queue,
out_queue=out_queue,
worker_id=i,
**self.udf_init_kwargs,
)
udf.apply_kwargs = kwargs
self.udfs.append(udf)
# Start the UDF processes
for udf in self.udfs:
udf.start()
# Fill input queue with documents but # of docs in queue is capped (#435).
def in_thread_func() -> None:
# Do not use session here to prevent concurrent use (#482).
for doc in doc_loader:
in_queue.put(doc) # block until a free slot is available
Thread(target=in_thread_func).start()
count_parsed = 0
total_count = len(doc_loader)
while (
any([udf.is_alive() for udf in self.udfs]) or not out_queue.empty()
) and count_parsed < total_count:
# Get doc from the out_queue and persist the result into postgres
try:
doc_name = out_queue.get() # block until an item is available
self.last_docs.add(doc_name)
# Update progress bar whenever an item has been processed
count_parsed += 1
if self.pb is not None:
self.pb.update(1)
except Exception as e:
# Raise an error for all the other exceptions.
raise (e)
# Join the UDF processes
for _ in self.udfs:
in_queue.put(UDF.TASK_DONE)
for udf in self.udfs:
udf.join()
# Flush the processes
self.udfs = []
class UDF(Process):
"""UDF class."""
TASK_DONE = "done"
def __init__(
self,
session_factory: sessionmaker = None,
runner: UDFRunner = None,
in_queue: Optional[Queue] = None,
out_queue: Optional[Queue] = None,
worker_id: int = 0,
**udf_init_kwargs: Any,
) -> None:
"""Initialize UDF.
:param in_queue: A Queue of input objects to processes
:param out_queue: A Queue of output objects from processes
:param worker_id: An ID of a process
"""
super().__init__()
self.daemon = True
self.session_factory = session_factory
self.runner = runner
self.in_queue = in_queue
self.out_queue = out_queue
self.worker_id = worker_id
# We use a workaround to pass in the apply kwargs
self.apply_kwargs: Dict[str, Any] = {}
def run(self) -> None:
"""Run function of UDF.
Call this method when the UDF is run as a Process in a
multiprocess setting The basic routine is: get from JoinableQueue,
apply, put / add outputs, loop
"""
# Each UDF get thread local (scoped) session from connection pools
# See SQLalchemy, using scoped sesion with multiprocessing.
Session = scoped_session(self.session_factory)
session = Session()
while True:
doc = self.in_queue.get() # block until an item is available
if doc == UDF.TASK_DONE:
break
# Merge the object with the session owned by the current child process.
# This does not happen during parsing when doc is transient.
if not inspect(doc).transient:
doc = session.merge(doc, load=False)
y = self.apply(doc, **self.apply_kwargs)
self.runner._add(session, y)
self.out_queue.put(doc.name)
session.commit()
session.close()
Session.remove()
def apply(
self, doc: Document, **kwargs: Any
) -> Union[Document, None, List[List[Dict[str, Any]]]]:
"""Apply function.
This function takes in an object, and returns a generator / set / list.
"""
raise NotImplementedError()
| fonduer-master | src/fonduer/utils/udf.py |
"""Fonduer visualizer."""
import logging
import os
import subprocess
import warnings
from builtins import object
from collections import defaultdict
from typing import DefaultDict, List, Optional, Tuple
from bs4 import BeautifulSoup
from IPython.display import DisplayHandle, display
from wand.color import Color
from wand.drawing import Drawing
from wand.image import Image
from fonduer.candidates.models import Candidate, SpanMention
from fonduer.parser.models import Sentence
from fonduer.utils.utils_visual import Bbox
logger = logging.getLogger(__name__)
class Visualizer(object):
"""Object to display bounding boxes on a pdf document."""
def __init__(self, pdf_path: str) -> None:
"""Initialize Visualizer.
:param pdf_path: directory where documents are stored
:return:
"""
self.pdf_path = pdf_path
def display_boxes(
self,
pdf_file: str,
boxes: List[Bbox],
alternate_colors: bool = False,
) -> List[Image]:
"""Display bounding boxes on the document.
Display each of the bounding boxes passed in 'boxes' on images of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = []
with Color("blue") as blue, Color("red") as red, Color(
"rgba(0, 0, 0, 0.0)"
) as transparent:
colors = [blue, red]
boxes_per_page: DefaultDict[int, int] = defaultdict(int)
boxes_by_page: DefaultDict[
int, List[Tuple[int, int, int, int]]
] = defaultdict(list)
for i, (page, top, bottom, left, right) in enumerate(boxes):
boxes_per_page[page] += 1
boxes_by_page[page].append((top, bottom, left, right))
for i, page_num in enumerate(boxes_per_page.keys()):
with Drawing() as draw:
img = pdf_to_img(pdf_file, page_num)
draw.fill_color = transparent
for j, (top, bottom, left, right) in enumerate(
boxes_by_page[page_num]
):
draw.stroke_color = (
colors[j % 2] if alternate_colors else colors[0]
)
draw.rectangle(left=left, top=top, right=right, bottom=bottom)
draw(img)
imgs.append(img)
return imgs
def display_candidates(
self, candidates: List[Candidate], pdf_file: Optional[str] = None
) -> DisplayHandle:
"""Display the bounding boxes of candidates.
Display the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
if not pdf_file:
pdf_file = os.path.join(self.pdf_path, candidates[0].document.name)
if os.path.isfile(pdf_file + ".pdf"):
pdf_file += ".pdf"
elif os.path.isfile(pdf_file + ".PDF"):
pdf_file += ".PDF"
else:
logger.error("display_candidates failed: pdf file missing.")
boxes = [m.context.get_bbox() for c in candidates for m in c.get_mentions()]
imgs = self.display_boxes(pdf_file, boxes, alternate_colors=True)
return display(*imgs)
def display_words(
self,
sentences: List[Sentence],
target: Optional[str] = None,
pdf_file: Optional[str] = None,
) -> DisplayHandle:
"""Display the bounding boxes of words.
Display the bounding boxes corresponding to words on the pdf.
"""
if not pdf_file:
pdf_file = os.path.join(self.pdf_path, sentences[0].document.name + ".pdf")
boxes = []
for sentence in sentences:
for i, word in enumerate(sentence.words):
if target is None or word == target:
boxes.append(
Bbox(
sentence.page[i],
sentence.top[i],
sentence.bottom[i],
sentence.left[i],
sentence.right[i],
)
)
imgs = self.display_boxes(pdf_file, boxes)
return display(*imgs)
def get_box(span: SpanMention) -> Bbox:
"""Get the bounding box."""
warnings.warn(
"get_box(span) is deprecated. Use span.get_bbox() instead.",
DeprecationWarning,
)
return Bbox(
min(span.get_attrib_tokens("page")),
min(span.get_attrib_tokens("top")),
max(span.get_attrib_tokens("bottom")),
min(span.get_attrib_tokens("left")),
max(span.get_attrib_tokens("right")),
)
def get_pdf_dim(pdf_file: str, page: int = 1) -> Tuple[int, int]:
"""Get the dimension of a pdf.
:param pdf_file: path to the pdf file
:param page: page number (starting from 1) to get a dimension for
:return: width, height
"""
html_content = subprocess.check_output(
f"pdftotext -f {page} -l {page} -bbox '{pdf_file}' -", shell=True
)
soup = BeautifulSoup(html_content, "html.parser")
pages = soup.find_all("page")
page_width, page_height = (
int(float(pages[0].get("width"))),
int(float(pages[0].get("height"))),
)
return page_width, page_height
def pdf_to_img(
pdf_file: str, page_num: int, pdf_dim: Optional[Tuple[int, int]] = None
) -> Image:
"""Convert pdf file into image.
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
if not pdf_dim:
pdf_dim = get_pdf_dim(pdf_file)
page_width, page_height = pdf_dim
img = Image(filename=f"{pdf_file}[{page_num - 1}]")
img.resize(page_width, page_height)
return img
| fonduer-master | src/fonduer/utils/visualizer.py |
"""Fonduer annotation model."""
from typing import Tuple
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from fonduer.utils.utils import camel_to_under
class AnnotationKeyMixin(object):
"""Mixin class for defining annotation key tables.
An AnnotationKey is the unique name associated with a set of Annotations,
corresponding e.g. to a single labeling function or feature.
"""
__name__: str # declare for mypy
@declared_attr
def __tablename__(cls) -> str:
"""Get the table name."""
return camel_to_under(cls.__name__)
@declared_attr
def name(cls) -> Column:
"""Name of the Key."""
return Column(String, primary_key=True)
@declared_attr
def candidate_classes(cls) -> Column:
"""List of strings of each Key name."""
return Column(postgresql.ARRAY(String), nullable=False)
@declared_attr
def __table_args__(cls) -> Tuple[UniqueConstraint]:
"""Get the table args."""
return (UniqueConstraint("name"),)
def __repr__(self) -> str:
"""Represent the annotation key as a string."""
return f"{self.__class__.__name__} ({self.name})"
class AnnotationMixin(object):
"""Mixin class for defining annotation tables.
An annotation is a value associated with a Candidate. Examples include
labels, features, and predictions. New types of annotations can be defined
by creating an annotation class and corresponding annotation, for example:
.. code-block:: python
from fonduer.utils.models import AnnotationMixin
from fonduer.meta import Meta
class NewAnnotation(AnnotationMixin, Meta.Base):
values = Column(Float, nullable=False)
The annotation class should include a Column attribute named values.
"""
__name__: str # declare for mypy
values: Column # declare for mypy
@declared_attr
def __tablename__(cls) -> str:
"""Get the table name."""
return camel_to_under(cls.__name__)
# The key is the "name" or "type" of the Annotation- e.g. the name of a
# feature, lf, or of a human annotator
@declared_attr
def keys(cls) -> Column:
"""List of strings of each Key name."""
return Column(postgresql.ARRAY(String), nullable=False)
# Every annotation is with respect to a candidate
@declared_attr
def candidate_id(cls) -> Column:
"""Id of the ``Candidate`` being annotated."""
return Column(
"candidate_id",
Integer,
ForeignKey("candidate.id", ondelete="CASCADE"),
primary_key=True,
)
@declared_attr
def candidate(cls) -> relationship:
"""``Candidate``."""
return relationship(
"Candidate",
backref=backref(
camel_to_under(cls.__name__) + "s",
cascade="all, delete-orphan",
cascade_backrefs=False,
),
cascade_backrefs=False,
)
def __repr__(self) -> str:
"""Represent the annotation as a string."""
return (
f"{self.__class__.__name__}"
f" ("
f"{self.keys}"
f" = "
f"{self.values}"
f")"
)
| fonduer-master | src/fonduer/utils/models/annotation.py |
"""Fonduer's utils model module."""
from fonduer.utils.models.annotation import AnnotationKeyMixin, AnnotationMixin
__all__ = ["AnnotationKeyMixin", "AnnotationMixin"]
| fonduer-master | src/fonduer/utils/models/__init__.py |
"""Fonduer structural modality utilities."""
import functools
from builtins import str
from typing import List, Optional, Tuple, Union
import numpy as np
from lxml import etree
from lxml.etree import _ElementTree
from lxml.html import HtmlElement, fromstring
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.span_mention import SpanMention, TemporarySpanMention
from fonduer.parser.models.sentence import Sentence
from fonduer.utils.data_model_utils.utils import _to_span
def get_tag(mention: Union[Candidate, Mention, TemporarySpanMention]) -> str:
"""Return the HTML tag of the Mention.
If a candidate is passed in, only the tag of its first Mention is returned.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
return str(span.sentence.html_tag)
def get_attributes(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML attributes of the Mention.
If a candidate is passed in, only the tag of its first Mention is returned.
A sample outout of this function on a Mention in a paragraph tag is
[u'style=padding-top: 8pt;padding-left: 20pt;text-indent: 0pt;text-align: left;']
:param mention: The Mention to evaluate
:return: list of strings representing HTML attributes
"""
span = _to_span(mention)
return span.sentence.html_attrs
@functools.lru_cache(maxsize=16)
def _get_etree_for_text(text: str) -> _ElementTree:
return etree.ElementTree(fromstring(text))
def _get_node(sentence: Sentence) -> HtmlElement:
# Using caching to speed up retrieve process
doc_etree = _get_etree_for_text(sentence.document.text)
return doc_etree.xpath(sentence.xpath)[0]
def get_parent_tag(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Optional[str]:
"""Return the HTML tag of the Mention's parent.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
If a candidate is passed in, only the tag of its first Mention is returned.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
i = _get_node(span.sentence)
return str(i.getparent().tag) if i.getparent() is not None else None
def get_prev_sibling_tags(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML tag of the Mention's previous siblings.
Previous siblings are Mentions which are at the same level in the HTML tree
as the given mention, but are declared before the given mention. If a
candidate is passed in, only the previous siblings of its first Mention are
considered in the calculation.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
prev_sibling_tags: List[str] = []
i = _get_node(span.sentence)
while i.getprevious() is not None:
prev_sibling_tags.insert(0, str(i.getprevious().tag))
i = i.getprevious()
return prev_sibling_tags
def get_next_sibling_tags(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML tag of the Mention's next siblings.
Next siblings are Mentions which are at the same level in the HTML tree as
the given mention, but are declared after the given mention.
If a candidate is passed in, only the next siblings of its last Mention
are considered in the calculation.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
next_sibling_tags = []
i = _get_node(span.sentence)
while i.getnext() is not None:
next_sibling_tags.append(str(i.getnext().tag))
i = i.getnext()
return next_sibling_tags
def get_ancestor_class_names(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML classes of the Mention's ancestors.
If a candidate is passed in, only the ancestors of its first Mention are
returned.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
class_names: List[str] = []
i = _get_node(span.sentence)
while i is not None:
class_names.insert(0, str(i.get("class")))
i = i.getparent()
return class_names
def get_ancestor_tag_names(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML tag of the Mention's ancestors.
For example, ['html', 'body', 'p'].
If a candidate is passed in, only the ancestors of its first Mention are returned.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
tag_names: List[str] = []
i = _get_node(span.sentence)
while i is not None:
tag_names.insert(0, str(i.tag))
i = i.getparent()
return tag_names
def get_ancestor_id_names(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> List[str]:
"""Return the HTML id's of the Mention's ancestors.
If a candidate is passed in, only the ancestors of its first Mention are
returned.
:param mention: The Mention to evaluate
"""
span = _to_span(mention)
id_names: List[str] = []
i = _get_node(span.sentence)
while i is not None:
id_names.insert(0, str(i.get("id")))
i = i.getparent()
return id_names
def common_ancestor(c: Tuple[SpanMention, ...]) -> List[str]:
"""Return the path to the root that is shared between a multinary-Mention Candidate.
In particular, this is the common path of HTML tags.
:param c: The multinary-Mention Candidate to evaluate
"""
spans = [_to_span(i) for i in c]
ancestors = [np.array(span.sentence.xpath.split("/")) for span in spans]
min_len = min([a.size for a in ancestors])
ancestor = ancestors[0]
ind = 0 # all the ancestors are common up to this index (exclusive).
while ind < min_len:
if not all([a[ind] == ancestor[ind] for a in ancestors]):
break
ind += 1
return list(ancestors[0][:ind])
def lowest_common_ancestor_depth(c: Tuple[SpanMention, ...]) -> int:
"""Return the lowest common ancestor depth.
In particular, return the minimum distance between a multinary-Mention Candidate to
their lowest common ancestor.
For example, if the tree looked like this::
html
├──<div> Mention 1 </div>
├──table
│ ├──tr
│ │ └──<th> Mention 2 </th>
we return 1, the distance from Mention 1 to the html root. Smaller values
indicate that two Mentions are close structurally, while larger values
indicate that two Mentions are spread far apart structurally in the
document.
:param c: The multinary-Mention Candidate to evaluate
"""
spans = [_to_span(i) for i in c]
ancestors = [np.array(span.sentence.xpath.split("/")) for span in spans]
min_len = min([a.size for a in ancestors])
ancestor = ancestors[0]
ind = 0 # all the ancestors are common up to this index (exclusive).
while ind < min_len:
if not all([a[ind] == ancestor[ind] for a in ancestors]):
break
ind += 1
return min_len - ind
| fonduer-master | src/fonduer/utils/data_model_utils/structural.py |
"""Fonduer textual modality utilities."""
from itertools import chain
from typing import Iterator, Union
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.utils.data_model_utils.utils import _to_span, _to_spans
from fonduer.utils.utils import tokens_to_ngrams
def same_sentence(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Sentence.
:param c: The candidate whose Mentions are being compared
"""
return all(
_to_span(c[i]).sentence is not None
and _to_span(c[i]).sentence == _to_span(c[0]).sentence
for i in range(len(c))
)
def get_between_ngrams(
c: Candidate,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Return the ngrams *between* two unary Mentions of a binary-Mention Candidate.
Get the ngrams *between* two unary Mentions of a binary-Mention Candidate,
where both share the same sentence Context.
:param c: The binary-Mention Candidate to evaluate.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If 'True', all ngrams will be returned in lower case
"""
if len(c) != 2:
raise ValueError("Only applicable to binary Candidates")
span0 = _to_span(c[0])
span1 = _to_span(c[1])
if span0.sentence != span1.sentence:
raise ValueError(
"Only applicable to Candidates where both spans are \
from the same immediate Context."
)
distance = abs(span0.get_word_start_index() - span1.get_word_start_index())
if span0.get_word_start_index() < span1.get_word_start_index():
for ngram in get_right_ngrams(
span0,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
else: # span0.get_word_start_index() > span1.get_word_start_index()
for ngram in get_right_ngrams(
span1,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
def get_left_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
window: int = 3,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams within a window to the *left* from the sentence Context.
For higher-arity Candidates, defaults to the *first* argument.
:param mention: The Mention to evaluate. If a candidate is given, default
to its first Mention.
:param window: The number of tokens to the left of the first argument to
return.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
span = _to_span(mention)
i = span.get_word_start_index()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[max(0, i - window) : i],
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
def get_right_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
window: int = 3,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams within a window to the *right* from the sentence Context.
For higher-arity Candidates, defaults to the *last* argument.
:param mention: The Mention to evaluate. If a candidate is given, default
to its last Mention.
:param window: The number of tokens to the left of the first argument to
return
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
span = _to_span(mention, idx=-1)
i = span.get_word_end_index()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[i + 1 : i + 1 + window],
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
def get_sentence_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
for ngram in get_left_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for ngram in get_right_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
def get_neighbor_sentence_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
d: int = 1,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in span.sentence.document.sentences
if abs(sentence.position - span.sentence.position) <= d
and sentence != span.sentence
]
):
yield ngram
| fonduer-master | src/fonduer/utils/data_model_utils/textual.py |
"""Fonduer tabular modality utilities."""
from builtins import range
from collections import defaultdict
from functools import lru_cache
from itertools import chain
from typing import DefaultDict, Iterator, List, Optional, Set, Tuple, Union
import deprecation
from fonduer import __version__
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.parser.models.sentence import Sentence
from fonduer.parser.models.table import Cell, Table
from fonduer.utils.data_model_utils.textual import (
get_neighbor_sentence_ngrams as get_neighbor_sentence_ngrams_in_textual,
get_sentence_ngrams as get_sentence_ngrams_in_textual,
same_sentence as same_sentence_in_textual,
)
from fonduer.utils.data_model_utils.utils import _to_span, _to_spans
from fonduer.utils.utils import tokens_to_ngrams
from fonduer.utils.utils_table import (
is_axis_aligned,
is_col_aligned,
is_row_aligned,
min_col_diff,
min_row_diff,
)
def same_table(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Table.
:param c: The candidate whose Mentions are being compared
"""
return all(
_to_span(c[i]).sentence.is_tabular()
and _to_span(c[i]).sentence.table == _to_span(c[0]).sentence.table
for i in range(len(c))
)
def same_row(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Row.
:param c: The candidate whose Mentions are being compared
"""
return same_table(c) and all(
is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
)
def same_col(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Col.
:param c: The candidate whose Mentions are being compared
"""
return same_table(c) and all(
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
)
def is_tabular_aligned(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Row or Col.
:param c: The candidate whose Mentions are being compared
"""
return same_table(c) and all(
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
or is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
)
def same_cell(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Cell.
:param c: The candidate whose Mentions are being compared
"""
return all(
_to_span(c[i]).sentence.cell is not None
and _to_span(c[i]).sentence.cell == _to_span(c[0]).sentence.cell
for i in range(len(c))
)
@deprecation.deprecated(
deprecated_in="0.8.3",
removed_in="0.9.0",
current_version=__version__,
details="Use :func:`textual.same_sentence()` instead",
)
def same_sentence(c: Candidate) -> bool:
"""Return True if all Mentions in the given candidate are from the same Sentence.
:param c: The candidate whose Mentions are being compared
"""
return same_sentence_in_textual(c)
def get_max_col_num(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Optional[int]:
"""Return the largest column number that a Mention occupies.
:param mention: The Mention to evaluate. If a candidate is given, default
to its last Mention.
"""
span = _to_span(mention, idx=-1)
if span.sentence.is_tabular():
return span.sentence.cell.col_end
else:
return None
def get_min_col_num(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Optional[int]:
"""Return the lowest column number that a Mention occupies.
:param mention: The Mention to evaluate. If a candidate is given, default
to its first Mention.
"""
span = _to_span(mention)
if span.sentence.is_tabular():
return span.sentence.cell.col_start
else:
return None
def get_max_row_num(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Optional[int]:
"""Return the largest row number that a Mention occupies.
:param mention: The Mention to evaluate. If a candidate is given, default
to its last Mention.
"""
span = _to_span(mention, idx=-1)
if span.sentence.is_tabular():
return span.sentence.cell.row_end
else:
return None
def get_min_row_num(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Optional[int]:
"""Return the lowest row number that a Mention occupies.
:param mention: The Mention to evaluate. If a candidate is given, default
to its first Mention.
"""
span = _to_span(mention)
if span.sentence.is_tabular():
return span.sentence.cell.row_start
else:
return None
@deprecation.deprecated(
deprecated_in="0.8.3",
removed_in="0.9.0",
current_version=__version__,
details="Use :func:`textual.get_sentence_ngrams()` instead",
)
def get_sentence_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams that are in the Sentence of the given Mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be
searched.
:param mention: The Mention whose Sentence is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
return get_sentence_ngrams_in_textual(mention, attrib, n_min, n_max, lower)
@deprecation.deprecated(
deprecated_in="0.8.3",
removed_in="0.9.0",
current_version=__version__,
details="Use :func:`textual.get_neighbor_sentence_ngrams()` instead",
)
def get_neighbor_sentence_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
d: int = 1,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
return get_neighbor_sentence_ngrams_in_textual(
mention, d, attrib, n_min, n_max, lower
)
def get_cell_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams that are in the Cell of the given mention, not including itself.
Note that if a candidate is passed in, all of its Mentions will be searched.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose Cell is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
if not span.sentence.is_tabular():
continue
for ngram in get_sentence_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in _get_table_cells(span.sentence.table)[
span.sentence.cell
]
if sentence != span.sentence
]
):
yield ngram
def get_neighbor_cell_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
dist: int = 1,
directions: bool = False,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[Union[str, Tuple[str, str]]]:
"""Get ngrams from all neighbor Cells.
Get the ngrams from all Cells that are within a given Cell distance in one
direction from the given Mention.
Note that if a candidate is passed in, all of its Mentions will be
searched. If `directions=True``, each ngram will be returned with a
direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose neighbor Cells are being searched
:param dist: The Cell distance within which a neighbor Cell must be to be
considered
:param directions: A Boolean expressing whether or not to return the
direction of each ngram
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:return: a *generator* of ngrams (or (ngram, direction) tuples if directions=True)
"""
# TODO: Fix this to be more efficient (optimize with SQL query)
spans = _to_spans(mention)
for span in spans:
if not span.sentence.is_tabular():
continue
for ngram in get_sentence_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
root_cell = span.sentence.cell
for sentence in chain.from_iterable(
[
_get_aligned_sentences(root_cell, "row"),
_get_aligned_sentences(root_cell, "col"),
]
):
row_diff = min_row_diff([sentence, root_cell], absolute=False)
col_diff = min_col_diff([sentence, root_cell], absolute=False)
if (
row_diff ^ col_diff # Exclusive OR
and abs(row_diff) + abs(col_diff) <= dist
):
if directions:
if col_diff == 0:
direction = "DOWN" if 0 < row_diff else "UP"
else:
direction = "RIGHT" if 0 < col_diff else "LEFT"
for ngram in tokens_to_ngrams(
getattr(sentence, attrib),
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield (ngram, direction)
else:
for ngram in tokens_to_ngrams(
getattr(sentence, attrib),
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
def get_row_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
spread: List[int] = [0, 0],
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams from all Cells that are in the same row as the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose row Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param spread: The number of rows above and below to also consider "aligned".
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis="row",
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower,
):
yield ngram
def get_col_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
spread: List[int] = [0, 0],
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams from all Cells that are in the same column as the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param spread: The number of cols left and right to also consider "aligned".
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis="col",
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower,
):
yield ngram
def get_aligned_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
spread: List[int] = [0, 0],
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams from all Cells in the same row or column as the given Mention.
Note that if a candidate is passed in, all of its Mentions will be
searched.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose row and column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param spread: The number of rows/cols above/below/left/right to also
consider "aligned".
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
for span in spans:
for ngram in get_row_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower
):
yield ngram
for ngram in get_col_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower
):
yield ngram
def get_head_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
axis: Optional[str] = None,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
) -> Iterator[str]:
"""Get the ngrams from the cell in the head of the row or column.
More specifically, this returns the ngrams in the leftmost cell in a row and/or the
ngrams in the topmost cell in the column, depending on the axis parameter.
Note that if a candidate is passed in, all of its Mentions will be searched.
Also note that if the mention is not tabular, nothing will be yielded.
:param mention: The Mention whose head Cells are being returned
:param axis: Which axis {'row', 'col'} to search. If None, then both row
and col are searched.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
"""
spans = _to_spans(mention)
axes: Set[str] = (axis,) if axis else ("row", "col") # type: ignore
for span in spans:
if span.sentence.is_tabular():
for axis in axes:
if getattr(span.sentence, _other_axis(axis) + "_start") == 0:
return
for sentence in getattr(
_get_head_cell(span.sentence.cell, axis), "sentences", []
):
for ngram in tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
@lru_cache(maxsize=1024)
def _get_head_cell(root_cell: Cell, axis: str) -> Cell:
other_axis = "row" if axis == "col" else "col"
aligned_cells = _get_aligned_cells(root_cell, axis)
return (
sorted(aligned_cells, key=lambda x: getattr(x, other_axis + "_start"))[0]
if aligned_cells
else None
)
@lru_cache(maxsize=256)
def _get_table_cells(table: Table) -> DefaultDict[Cell, List[Sentence]]:
"""Cache table cells and the cells' sentences.
This function significantly improves the speed of `get_row_ngrams`
primarily by reducing the number of queries that are made (which were
previously the bottleneck. Rather than taking a single mention, then its
sentence, then its table, then all the cells in the table, then all the
sentences in each cell, and performing operations on that series of
queries, this performs a single query for all the sentences in a table and
returns all of the cells and the cells sentences directly.
:param table: the Table object to cache.
:return: an iterator of (Cell, [Sentence._asdict(), ...]) tuples.
"""
sent_map: DefaultDict[Cell, List[Sentence]] = defaultdict(list)
for sent in table.sentences:
sent_map[sent.cell].append(sent)
return sent_map
def _get_axis_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
axis: str,
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
spread: List[int] = [0, 0],
lower: bool = True,
) -> Iterator[str]:
span = _to_span(mention)
if not span.sentence.is_tabular():
return
yield
for ngram in get_sentence_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for sentence in _get_aligned_sentences(span.sentence, axis, spread=spread):
for ngram in tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
@lru_cache(maxsize=1024)
def _get_aligned_cells(root_cell: Cell, axis: str) -> List[Cell]:
aligned_cells = [
cell
for cell in root_cell.table.cells
if is_axis_aligned(root_cell, cell, axis=axis) and cell != root_cell
]
return aligned_cells
def _get_aligned_sentences(
root_sentence: Sentence, axis: str, spread: List[int] = [0, 0]
) -> List[Sentence]:
cells = _get_table_cells(root_sentence.table).items()
aligned_sentences = [
sentence
for (cell, sentences) in cells
if is_axis_aligned(root_sentence, cell, axis=axis, spread=spread)
for sentence in sentences
if sentence != root_sentence
]
return aligned_sentences
def _other_axis(axis: str) -> str:
return "row" if axis == "col" else "col"
| fonduer-master | src/fonduer/utils/data_model_utils/tabular.py |
"""Fonduer's data model utils module."""
from fonduer.utils.data_model_utils.structural import (
common_ancestor,
get_ancestor_class_names,
get_ancestor_id_names,
get_ancestor_tag_names,
get_attributes,
get_next_sibling_tags,
get_parent_tag,
get_prev_sibling_tags,
get_tag,
lowest_common_ancestor_depth,
)
from fonduer.utils.data_model_utils.tabular import (
get_aligned_ngrams,
get_cell_ngrams,
get_col_ngrams,
get_head_ngrams,
get_max_col_num,
get_min_col_num,
get_neighbor_cell_ngrams,
get_neighbor_sentence_ngrams,
get_row_ngrams,
get_sentence_ngrams,
is_tabular_aligned,
same_cell,
same_col,
same_row,
same_sentence,
same_table,
)
from fonduer.utils.data_model_utils.textual import (
get_between_ngrams,
get_left_ngrams,
get_right_ngrams,
)
from fonduer.utils.data_model_utils.utils import get_matches, is_superset, overlap
from fonduer.utils.data_model_utils.visual import (
get_aligned_lemmas,
get_horz_ngrams,
get_page,
get_page_horz_percentile,
get_page_vert_percentile,
get_vert_ngrams,
get_visual_aligned_lemmas,
is_horz_aligned,
is_vert_aligned,
is_vert_aligned_center,
is_vert_aligned_left,
is_vert_aligned_right,
same_page,
)
__all__ = [
"common_ancestor",
"get_aligned_lemmas",
"get_aligned_ngrams",
"get_ancestor_class_names",
"get_ancestor_id_names",
"get_ancestor_tag_names",
"get_attributes",
"get_between_ngrams",
"get_cell_ngrams",
"get_col_ngrams",
"get_head_ngrams",
"get_horz_ngrams",
"get_left_ngrams",
"get_matches",
"get_max_col_num",
"get_min_col_num",
"get_neighbor_cell_ngrams",
"get_neighbor_sentence_ngrams",
"get_next_sibling_tags",
"get_page",
"get_page_horz_percentile",
"get_page_vert_percentile",
"get_parent_tag",
"get_prev_sibling_tags",
"get_right_ngrams",
"get_row_ngrams",
"get_sentence_ngrams",
"get_tag",
"get_vert_ngrams",
"get_visual_aligned_lemmas",
"is_horz_aligned",
"is_superset",
"is_tabular_aligned",
"is_vert_aligned",
"is_vert_aligned_center",
"is_vert_aligned_left",
"is_vert_aligned_right",
"lowest_common_ancestor_depth",
"overlap",
"same_cell",
"same_col",
"same_page",
"same_row",
"same_sentence",
"same_table",
]
| fonduer-master | src/fonduer/utils/data_model_utils/__init__.py |
"""Fonduer data model utils."""
import logging
from functools import lru_cache
from typing import Callable, Iterable, List, Set, Union
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.span_mention import TemporarySpanMention
@lru_cache(maxsize=1024)
def _to_span(
x: Union[Candidate, Mention, TemporarySpanMention], idx: int = 0
) -> TemporarySpanMention:
"""Convert a Candidate, Mention, or Span to a span."""
if isinstance(x, Candidate):
return x[idx].context
elif isinstance(x, Mention):
return x.context
elif isinstance(x, TemporarySpanMention):
return x
else:
raise ValueError(f"{type(x)} is an invalid argument type")
@lru_cache(maxsize=1024)
def _to_spans(
x: Union[Candidate, Mention, TemporarySpanMention]
) -> List[TemporarySpanMention]:
"""Convert a Candidate, Mention, or Span to a list of spans."""
if isinstance(x, Candidate):
return [_to_span(m) for m in x]
elif isinstance(x, Mention):
return [x.context]
elif isinstance(x, TemporarySpanMention):
return [x]
else:
raise ValueError(f"{type(x)} is an invalid argument type")
def is_superset(a: Iterable, b: Iterable) -> bool:
"""Check if a is a superset of b.
This is typically used to check if ALL of a list of sentences is in the
ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
"""
return set(a).issuperset(b)
def overlap(a: Iterable, b: Iterable) -> bool:
"""Check if a overlaps b.
This is typically used to check if ANY of a list of sentences is in the
ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
"""
return not set(a).isdisjoint(b)
def get_matches(
lf: Callable, candidate_set: Set[Candidate], match_values: List[int] = [1, -1]
) -> List[Candidate]:
"""Return a list of candidates that are matched by a particular LF.
A simple helper function to see how many matches (non-zero by default) an
LF gets.
:param lf: The labeling function to apply to the candidate_set
:param candidate_set: The set of candidates to evaluate
:param match_values: An option list of the values to consider as matched.
[1, -1] by default.
"""
logger = logging.getLogger(__name__)
matches: List[Candidate] = []
for c in candidate_set:
label = lf(c)
if label in match_values:
matches.append(c)
logger.info(f"{len(matches)} matches")
return matches
| fonduer-master | src/fonduer/utils/data_model_utils/utils.py |
"""Fonduer visual modality utilities."""
from builtins import range
from collections import defaultdict
from functools import lru_cache
from typing import Any, DefaultDict, Iterator, List, Set, Union
from fonduer.candidates.mentions import Ngrams
from fonduer.candidates.models import Candidate, Mention
from fonduer.candidates.models.span_mention import TemporarySpanMention
from fonduer.parser.models import Document, Sentence
from fonduer.utils.data_model_utils.utils import _to_span, _to_spans
from fonduer.utils.utils import tokens_to_ngrams
from fonduer.utils.utils_visual import (
bbox_horz_aligned,
bbox_vert_aligned,
bbox_vert_aligned_center,
bbox_vert_aligned_left,
bbox_vert_aligned_right,
)
@lru_cache(maxsize=1024)
def get_page(mention: Union[Candidate, Mention, TemporarySpanMention]) -> int:
"""Return the page number of the given mention.
If a candidate is passed in, this returns the page of its first Mention.
:param mention: The Mention to get the page number of.
"""
span = _to_span(mention)
return span.get_attrib_tokens("page")[0]
@lru_cache(maxsize=1024)
def is_horz_aligned(c: Candidate) -> bool:
"""Return True if all the components of c are horizontally aligned.
Horizontal alignment means that the bounding boxes of each Mention of c
shares a similar y-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_horz_aligned(_to_span(c[i]).get_bbox(), _to_span(c[0]).get_bbox())
for i in range(len(c))
]
)
@lru_cache(maxsize=1024)
def is_vert_aligned(c: Candidate) -> bool:
"""Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned(_to_span(c[i]).get_bbox(), _to_span(c[0]).get_bbox())
for i in range(len(c))
]
)
@lru_cache(maxsize=1024)
def is_vert_aligned_left(c: Candidate) -> bool:
"""Return true if all components are vertically aligned on their left border.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the left
border of their bounding boxes.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_left(
_to_span(c[i]).get_bbox(), _to_span(c[0]).get_bbox()
)
for i in range(len(c))
]
)
@lru_cache(maxsize=1024)
def is_vert_aligned_right(c: Candidate) -> bool:
"""Return true if all components vertically aligned on their right border.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the right
border of their bounding boxes.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_right(
_to_span(c[i]).get_bbox(), _to_span(c[0]).get_bbox()
)
for i in range(len(c))
]
)
@lru_cache(maxsize=1024)
def is_vert_aligned_center(c: Candidate) -> bool:
"""Return true if all the components are vertically aligned on their center.
Vertical alignment means that the bounding boxes of each Mention of c
shares a similar x-axis value in the visual rendering of the document. In
this function the similarity of the x-axis value is based on the center of
their bounding boxes.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_center(
_to_span(c[i]).get_bbox(), _to_span(c[0]).get_bbox()
)
for i in range(len(c))
]
)
@lru_cache(maxsize=1024)
def same_page(c: Candidate) -> bool:
"""Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Mention.
:param c: The candidate to evaluate
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and _to_span(c[i]).get_bbox().page == _to_span(c[0]).get_bbox().page
for i in range(len(c))
]
)
def get_horz_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
from_sentence: bool = True,
) -> Iterator[str]:
"""Return all ngrams which are visually horizontally aligned with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, pos_tags).
This option is valid only when ``from_sentence==True``.
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_sentence: If True, return ngrams of any ``Sentence`` that is
horizontally aligned (in the same page) with the mention's ``Sentence``.
If False, return ngrams that are horizontally aligned with the mention
no matter which ``Sentence`` they are from.
:return: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in _get_direction_ngrams(
"horz", span, attrib, n_min, n_max, lower, from_sentence
):
yield ngram
def get_vert_ngrams(
mention: Union[Candidate, Mention, TemporarySpanMention],
attrib: str = "words",
n_min: int = 1,
n_max: int = 1,
lower: bool = True,
from_sentence: bool = True,
) -> Iterator[str]:
"""Return all ngrams which are visually vertically aligned with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, pos_tags).
This option is valid only when ``from_sentence==True``.
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_sentence: If True, return ngrams of any ``Sentence`` that is
vertically aligned (in the same page) with the mention's ``Sentence``.
If False, return ngrams that are vertically aligned with the mention
no matter which ``Sentence`` they are from.
:return: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in _get_direction_ngrams(
"vert", span, attrib, n_min, n_max, lower, from_sentence
):
yield ngram
def _get_direction_ngrams(
direction: str,
c: Union[Candidate, Mention, TemporarySpanMention],
attrib: str,
n_min: int,
n_max: int,
lower: bool,
from_sentence: bool,
) -> Iterator[str]:
bbox_direction_aligned = (
bbox_vert_aligned if direction == "vert" else bbox_horz_aligned
)
ngrams_space = Ngrams(n_max=n_max, split_tokens=[])
f = (lambda w: w.lower()) if lower else (lambda w: w)
spans = _to_spans(c)
for span in spans:
if not span.sentence.is_visual():
continue
for sentence in span.sentence.document.sentences:
# Skip if not in the same page.
if span.sentence.get_bbox().page != sentence.get_bbox().page:
continue
if from_sentence:
if (
bbox_direction_aligned(sentence.get_bbox(), span.get_bbox())
and sentence is not span.sentence # not from its Sentence
):
for ngram in tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
else:
for ts in ngrams_space.apply(sentence):
if ( # True if visually aligned AND not from itself.
bbox_direction_aligned(ts.get_bbox(), span.get_bbox())
and ts not in span
and span not in ts
):
yield f(ts.get_span())
def get_vert_ngrams_left(c): # type: ignore
"""Not implemented."""
# TODO
return
def get_vert_ngrams_right(c): # type: ignore
"""Not implemented."""
# TODO
return
def get_vert_ngrams_center(c): # type: ignore
"""Not implemented."""
# TODO
return
def get_visual_header_ngrams(c, axis=None): # type: ignore
"""Not implemented."""
# TODO
return
def get_visual_distance(c, axis=None): # type: ignore
"""Not implemented."""
# TODO
return
# Default dimensions for 8.5" x 11"
DEFAULT_WIDTH = 612
DEFAULT_HEIGHT = 792
def get_page_vert_percentile(
mention: Union[Candidate, Mention, TemporarySpanMention],
page_width: int = DEFAULT_WIDTH,
page_height: int = DEFAULT_HEIGHT,
) -> float:
"""Return which percentile from the TOP in the page the Mention is located in.
Percentile is calculated where the top of the page is 0.0, and the bottom
of the page is 1.0. For example, a Mention in at the top 1/4 of the page
will have a percentile of 0.25.
Page width and height are based on pt values::
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentil of its
first Mention is returned.
:param mention: The Mention to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
"""
span = _to_span(mention)
return span.get_bbox().top / page_height
def get_page_horz_percentile(
mention: Union[Candidate, Mention, TemporarySpanMention],
page_width: int = DEFAULT_WIDTH,
page_height: int = DEFAULT_HEIGHT,
) -> float:
"""Return which percentile from the LEFT in the page the Mention is located in.
Percentile is calculated where the left of the page is 0.0, and the right
of the page is 1.0.
Page width and height are based on pt values::
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentile of its
first Mention is returned.
:param mention: The Mention to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
"""
span = _to_span(mention)
return span.get_bbox().left / page_width
def _assign_alignment_features(sentences_by_key: defaultdict, align_type: str) -> None:
for key, sentences in sentences_by_key.items():
if len(sentences) == 1:
continue
context_lemmas: Set[str] = set()
for p in sentences:
p._aligned_lemmas.update(context_lemmas)
# update lemma context for upcoming sentences in the group
if len(p.lemmas) < 7:
new_lemmas = [lemma.lower() for lemma in p.lemmas if lemma.isalpha()]
context_lemmas.update(new_lemmas)
context_lemmas.update(align_type + lemma for lemma in new_lemmas)
@lru_cache(maxsize=2)
def _preprocess_visual_features(doc: Document) -> None:
if hasattr(doc, "_visual_features"):
return
# cache flag
doc._visual_features = True
sentence_by_page: DefaultDict[str, List[Sentence]] = defaultdict(list)
for sentence in doc.sentences:
sentence_by_page[sentence.page[0]].append(sentence)
sentence._aligned_lemmas = set()
for page, sentences in sentence_by_page.items():
# process per page alignments
yc_aligned: DefaultDict[Any, List[Sentence]] = defaultdict(list)
x0_aligned: DefaultDict[Any, List[Sentence]] = defaultdict(list)
xc_aligned: DefaultDict[Any, List[Sentence]] = defaultdict(list)
x1_aligned: DefaultDict[Any, List[Sentence]] = defaultdict(list)
for sentence in sentences:
sentence.bbox = sentence.get_bbox()
sentence.yc = (sentence.bbox.top + sentence.bbox.bottom) / 2
sentence.x0 = sentence.bbox.left
sentence.x1 = sentence.bbox.right
sentence.xc = (sentence.x0 + sentence.x1) / 2
# index current sentence by different alignment keys
yc_aligned[sentence.yc].append(sentence)
x0_aligned[sentence.x0].append(sentence)
x1_aligned[sentence.x1].append(sentence)
xc_aligned[sentence.xc].append(sentence)
for l in yc_aligned.values():
l.sort(key=lambda p: p.xc)
for l in x0_aligned.values():
l.sort(key=lambda p: p.yc)
for l in x1_aligned.values():
l.sort(key=lambda p: p.yc)
for l in xc_aligned.values():
l.sort(key=lambda p: p.yc)
_assign_alignment_features(yc_aligned, "Y_")
_assign_alignment_features(x0_aligned, "LEFT_")
_assign_alignment_features(x1_aligned, "RIGHT_")
_assign_alignment_features(xc_aligned, "CENTER_")
def get_visual_aligned_lemmas(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Iterator[str]:
"""Return a generator of the lemmas aligned visually with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate.
"""
spans = _to_spans(mention)
for span in spans:
sentence = span.sentence
doc = sentence.document
# cache features for the entire document
_preprocess_visual_features(doc)
for aligned_lemma in sentence._aligned_lemmas:
yield aligned_lemma
def get_aligned_lemmas(
mention: Union[Candidate, Mention, TemporarySpanMention]
) -> Set[str]:
"""Return a set of the lemmas aligned visually with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention to evaluate.
"""
return set(get_visual_aligned_lemmas(mention))
| fonduer-master | src/fonduer/utils/data_model_utils/visual.py |
"""Fonduer's logging module."""
from fonduer.utils.logging.tensorboard_writer import TensorBoardLogger
__all__ = ["TensorBoardLogger"]
| fonduer-master | src/fonduer/utils/logging/__init__.py |
"""Fonduer tensorboard logger."""
from tensorboardX import SummaryWriter
class TensorBoardLogger(object):
"""A class for logging to Tensorboard during training process."""
def __init__(self, log_dir: str):
"""Create a summary writer logging to log_dir."""
self.writer = SummaryWriter(log_dir)
def add_scalar(self, name: str, value: float, step: int) -> None:
"""Log a scalar variable."""
self.writer.add_scalar(name, value, step)
def close(self) -> None:
"""Close the tensorboard logger."""
self.writer.close()
| fonduer-master | src/fonduer/utils/logging/tensorboard_writer.py |
"""Fonduer's parser module."""
from fonduer.parser.parser import Parser
__all__ = ["Parser"]
| fonduer-master | src/fonduer/parser/__init__.py |
"""Fonduer parser."""
import itertools
import logging
import re
import warnings
from builtins import range
from collections import defaultdict
from typing import (
Any,
Collection,
Dict,
Iterator,
List,
Optional,
Pattern,
Tuple,
Union,
)
import lxml.etree
import lxml.html
from lxml.html import HtmlElement
from sqlalchemy.orm import Session
from fonduer.parser.lingual_parser import LingualParser, SimpleParser, SpacyParser
from fonduer.parser.models import (
Caption,
Cell,
Context,
Document,
Figure,
Paragraph,
Section,
Sentence,
Table,
)
from fonduer.parser.models.utils import construct_stable_id
from fonduer.parser.visual_parser import VisualParser
from fonduer.utils.udf import UDF, UDFRunner
logger = logging.getLogger(__name__)
class Parser(UDFRunner):
r"""Parses into documents into Fonduer's Data Model.
:param session: The database session to use.
:param parallelism: The number of processes to use in parallel. Default 1.
:param structural: Whether to parse structural information from a DOM.
:param blacklist: A list of tag types to ignore. Default ["style", "script"].
:param flatten: A list of tag types to flatten. Default ["span", "br"]
:param language: Which spaCy NLP language package. Default "en".
:param lingual: Whether or not to include NLP information. Default True.
:param lingual_parser: A custom lingual parser that inherits
:class:`LingualParser <fonduer.parser.lingual_parser.LingualParser>`.
When specified, `language` will be ignored.
When not, :class:`Spacy` with `language` will be used.
:param strip: Whether or not to strip whitespace during parsing. Default True.
:param replacements: A list of tuples where the regex string in the
first position is replaced by the character in the second position.
Default [(u"[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")], which
replaces various unicode variants of a hyphen (e.g. emdash, endash,
minus, etc.) with a standard ASCII hyphen.
:param tabular: Whether to include tabular information in the parse.
:param visual_parser: A visual parser that parses visual information.
Defaults to None (visual information is not parsed).
"""
def __init__(
self,
session: Session,
parallelism: int = 1,
structural: bool = True, # structural information
blacklist: List[str] = [
"style",
"script",
], # ignore tag types, default: style, script
flatten: List[str] = ["span", "br"], # flatten tag types, default: span, br
language: str = "en",
lingual: bool = True, # lingual information
lingual_parser: Optional[LingualParser] = None,
strip: bool = True,
replacements: List[Tuple[str, str]] = [
("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")
],
tabular: bool = True, # tabular information
visual_parser: Optional[VisualParser] = None, # visual parser
) -> None:
"""Initialize Parser."""
super().__init__(
session,
ParserUDF,
parallelism=parallelism,
structural=structural,
blacklist=blacklist,
flatten=flatten,
lingual=lingual,
lingual_parser=lingual_parser,
strip=strip,
replacements=replacements,
tabular=tabular,
visual_parser=visual_parser,
language=language,
)
def apply( # type: ignore
self,
doc_loader: Collection[Document],
clear: bool = True,
parallelism: Optional[int] = None,
progress_bar: bool = True,
) -> None:
"""Run the Parser.
:param doc_loader: An iteratable of ``Documents`` to parse. Typically,
one of Fonduer's document preprocessors.
:param clear: Whether or not to clear the labels table before applying
these LFs.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
"""
super().apply(
doc_loader,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
def _add(self, session: Session, doc: Union[Document, None]) -> None:
# Persist the object if no error happens during parsing.
if doc:
session.add(doc)
session.commit()
def clear(self) -> None: # type: ignore
"""Clear all of the ``Context`` objects in the database."""
self.session.query(Context).delete(synchronize_session="fetch")
def get_last_documents(self) -> List[Document]:
"""Return the most recently successfully parsed list of ``Documents``.
:return: A list of the most recently parsed ``Documents`` ordered by name.
"""
return (
self.session.query(Document)
.filter(Document.name.in_(self.last_docs))
.order_by(Document.name)
.all()
)
def get_documents(self) -> List[Document]:
"""Return all the successfully parsed ``Documents`` in the database.
:return: A list of all ``Documents`` in the database ordered by name.
"""
# return (
# self.session.query(Document, Sentence)
# .join(Sentence, Document.id == Sentence.document_id)
# .all()
# )
# return self.session.query(Sentence).order_by(Sentence.name).all()
return self.session.query(Document).order_by(Document.name).all()
class ParserUDF(UDF):
"""Parser UDF class."""
def __init__(
self,
structural: bool,
blacklist: Union[str, List[str]],
flatten: Union[str, List[str]],
lingual: bool,
lingual_parser: Optional[LingualParser],
strip: bool,
replacements: List[Tuple[str, str]],
tabular: bool,
visual_parser: Optional[VisualParser],
language: Optional[str],
**kwargs: Any,
) -> None:
"""Initialize Parser UDF.
:param replacements: a list of (_pattern_, _replace_) tuples where
_pattern_ isinstance a regex and _replace_ is a character string.
All occurents of _pattern_ in the text will be replaced by
_replace_.
"""
super().__init__(**kwargs)
# structural (html) setup
self.structural = structural
self.blacklist = blacklist if isinstance(blacklist, list) else [blacklist]
self.flatten = flatten if isinstance(flatten, list) else [flatten]
# lingual setup
self.language = language
self.strip = strip
self.replacements: List[Tuple[Pattern, str]] = []
for (pattern, replace) in replacements:
self.replacements.append((re.compile(pattern, flags=re.UNICODE), replace))
self.lingual = lingual
if lingual_parser:
self.lingual_parser = lingual_parser
else:
self.lingual_parser = SpacyParser(self.language)
# Fallback to SimpleParser if a tokenizer is not supported.
if not self.lingual_parser.has_tokenizer_support():
self.lingual_parser = SimpleParser()
if self.lingual and not self.lingual_parser.has_NLP_support():
logger.warning(
f"Lingual mode will be turned off, "
f"as spacy doesn't provide support for this "
f"language ({self.language})"
)
self.lingual = False
# tabular setup
self.tabular = tabular
# visual setup
self.visual_parser = visual_parser
def apply( # type: ignore
self, document: Document, **kwargs: Any
) -> Optional[Document]:
"""Parse a text in an instance of Document.
:param document: document to parse.
"""
try:
[y for y in self.parse(document, document.text)]
if self.visual_parser:
if not self.visual_parser.is_parsable(document.name):
warnings.warn(
(
f"Visual parse failed. "
f"{document.name} not a PDF. "
f"Proceeding without visual parsing."
),
RuntimeWarning,
)
else:
# Add visual attributes
[
y
for y in self.visual_parser.parse(
document.name, document.sentences
)
]
return document
except Exception as e:
logging.exception(
(
f"Document {document.name} not added to database, "
f"because of parse error: \n{e}"
)
)
return None
def _parse_table(self, node: HtmlElement, state: Dict[str, Any]) -> Dict[str, Any]:
"""Parse a table node.
:param node: The lxml table node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
if not self.tabular:
logger.error("Called _parse_table without tabular activated.")
return state
if node.tag == "table":
table_idx = state["table"]["idx"]
stable_id = f"{state['document'].name}::{'table'}:{state['table']['idx']}"
# Set name for Table
name = node.attrib["name"] if "name" in node.attrib else None
# Create the Table in the DB
parts = {}
parts["document"] = state["document"]
parts["stable_id"] = stable_id
parts["name"] = name
parts["position"] = table_idx
parent = state["parent"][node]
if isinstance(parent, Cell):
parts["section"] = parent.table.section
elif isinstance(parent, Section):
parts["section"] = parent
else:
raise NotImplementedError("Table is not within a Section or Cell")
state["context"][node] = Table(**parts)
# Local state for each table. This is required to support nested
# tables
state["table"][table_idx] = {
"grid": defaultdict(int),
"cell_pos": 0,
"row_idx": -1,
"col_idx": 0,
}
# Increment table counter
state["table"]["idx"] += 1
elif node.tag == "tr":
if not isinstance(state["parent"][node], Table):
raise NotImplementedError("Table row parent must be a Table.")
state["table"][state["parent"][node].position]["col_idx"] = 0
state["table"][state["parent"][node].position]["row_idx"] += 1
elif node.tag in ["td", "th"]:
if not isinstance(state["parent"][node], Table):
raise NotImplementedError("Cell parent must be a Table.")
if not state["table"][state["parent"][node].position]["row_idx"] >= 0:
raise NotImplementedError("Table cell encountered before a table row.")
# calculate row_start/col_start
while state["table"][state["parent"][node].position]["grid"][
(
state["table"][state["parent"][node].position]["row_idx"],
state["table"][state["parent"][node].position]["col_idx"],
)
]: # while a cell on the grid is occupied, keep moving
state["table"][state["parent"][node].position]["col_idx"] += 1
col_start = state["table"][state["parent"][node].position]["col_idx"]
row_start = state["table"][state["parent"][node].position]["row_idx"]
# calculate row_end/col_end
row_end = row_start
if "rowspan" in node.attrib:
try:
row_end += int(node.get("rowspan")) - 1
except ValueError:
logger.error(f"Rowspan has invalid value: '{node.get('rowspan')}'")
col_end = col_start
if "colspan" in node.attrib:
try:
col_end += int(node.get("colspan")) - 1
except ValueError:
logger.error(f"Colspan has invalid value: '{node.get('colspan')}'")
# update grid with occupied cells
for r, c in itertools.product(
list(range(row_start, row_end + 1)), list(range(col_start, col_end + 1))
):
state["table"][state["parent"][node].position]["grid"][(r, c)] = 1
# Set name for Cell
name = node.attrib["name"] if "name" in node.attrib else None
# construct cell
parts = defaultdict(list)
parts["document"] = state["document"]
parts["name"] = name
parts["table"] = state["parent"][node]
parts["row_start"] = row_start
parts["row_end"] = row_end
parts["col_start"] = col_start
parts["col_end"] = col_end
parts["position"] = state["table"][state["parent"][node].position][
"cell_pos"
]
stable_id = (
f"{parts['document'].name}"
f"::"
f"{'cell'}"
f":"
f"{parts['table'].position}"
f":"
f"{row_start}"
f":"
f"{col_start}"
)
parts["stable_id"] = stable_id
# Create the Cell in the DB
state["context"][node] = Cell(**parts)
# Update position
state["table"][state["parent"][node].position]["col_idx"] += 1
state["table"][state["parent"][node].position]["cell_pos"] += 1
return state
def _parse_figure(self, node: HtmlElement, state: Dict[str, Any]) -> Dict[str, Any]:
"""Parse the figure node.
:param node: The lxml img node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
if node.tag not in ["img", "figure"]:
return state
# Process the Figure
stable_id = (
f"{state['document'].name}"
f"::"
f"{'figure'}"
f":"
f"{state['figure']['idx']}"
)
# Set name for Figure
name = node.attrib["name"] if "name" in node.attrib else None
# img within a Figure get's processed in the parent Figure
if node.tag == "img" and isinstance(state["parent"][node], Figure):
return state
# NOTE: We currently do NOT support nested figures.
parts: Dict[str, Any] = {}
parent = state["parent"][node]
if isinstance(parent, Section):
parts["section"] = parent
elif isinstance(parent, Cell):
parts["section"] = parent.table.section
parts["cell"] = parent
else:
logger.warning(f"Figure is nested within {state['parent'][node]}")
return state
parts["document"] = state["document"]
parts["stable_id"] = stable_id
parts["name"] = name
parts["position"] = state["figure"]["idx"]
# If processing a raw img
if node.tag == "img":
# Create the Figure entry in the DB
parts["url"] = node.get("src")
state["context"][node] = Figure(**parts)
elif node.tag == "figure":
# Pull the image from a child img node, if one exists
imgs = [child for child in node if child.tag == "img"]
# In case the image from the child img node doesn't exist
if len(imgs) == 0:
logger.warning("No image found in Figure.")
return state
if len(imgs) > 1:
logger.warning("Figure contains multiple images.")
# Right now we don't support multiple URLs in the Figure context
# As a workaround, just ignore the outer Figure and allow processing
# of the individual images. We ignore the accompanying figcaption
# by marking it as visited.
for child in node:
if child.tag == "figcaption":
child.set("visited", "true")
return state
img = imgs[0]
img.set("visited", "true")
# Create the Figure entry in the DB
parts["url"] = img.get("src")
state["context"][node] = Figure(**parts)
state["figure"]["idx"] += 1
return state
def _parse_sentence(
self, paragraph: Paragraph, node: HtmlElement, state: Dict[str, Any]
) -> Iterator[Sentence]:
"""Parse the Sentences of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
text = state["paragraph"]["text"]
field = state["paragraph"]["field"]
# Set name for Sentence
name = node.attrib["name"] if "name" in node.attrib else None
# Lingual Parse
document = state["document"]
for parts in self.lingual_parser.split_sentences(text):
abs_offset = state["sentence"]["abs_offset"]
parts["abs_char_offsets"] = [
char_offset + abs_offset for char_offset in parts["char_offsets"]
]
parts["document"] = document
# NOTE: Why do we overwrite this from the spacy parse?
parts["position"] = state["sentence"]["idx"]
abs_sentence_offset_end = (
state["sentence"]["abs_offset"]
+ parts["char_offsets"][-1]
+ len(parts["words"][-1])
)
parts["stable_id"] = construct_stable_id(
document,
"sentence",
state["sentence"]["abs_offset"],
abs_sentence_offset_end,
)
parts["name"] = name
state["sentence"]["abs_offset"] = abs_sentence_offset_end
if self.structural:
context_node = node.getparent() if field == "tail" else node
tree = lxml.etree.ElementTree(state["root"])
parts["xpath"] = tree.getpath(context_node)
parts["html_tag"] = context_node.tag
parts["html_attrs"] = [
"=".join(x)
for x in context_node.attrib.items()
if x[0] != "visited"
]
# Extending html style attribute with the styles
# from inline style class for the element.
cur_style_index = None
for index, attr in enumerate(parts["html_attrs"]):
if attr.find("style") >= 0:
cur_style_index = index
break
head = state["root"].find("head")
styles = None
if head is not None:
styles = head.find("style")
if styles is not None:
for x in list(context_node.attrib.items()):
if x[0] == "class":
exp = r"(." + x[1] + r")([\n\s\r]*)\{(.*?)\}"
r = re.compile(exp, re.DOTALL)
if r.search(styles.text) is not None:
if cur_style_index is not None:
parts["html_attrs"][cur_style_index] += (
r.search(styles.text)
.group(3)
.replace("\r", "")
.replace("\n", "")
.replace("\t", "")
)
else:
parts["html_attrs"].extend(
[
"style="
+ re.sub(
r"\s{1,}",
" ",
r.search(styles.text)
.group(3)
.replace("\r", "")
.replace("\n", "")
.replace("\t", "")
.strip(),
)
]
)
break
parts["position"] = state["sentence"]["idx"]
# If tabular, consider own Context first in case a Cell
# was just created. Otherwise, defer to the parent.
parent = paragraph
if isinstance(parent, Paragraph):
parts["section"] = parent.section
parts["paragraph"] = parent
if parent.cell: # if True self.tabular is also always True
parts["table"] = parent.cell.table
parts["cell"] = parent.cell
parts["row_start"] = parent.cell.row_start
parts["row_end"] = parent.cell.row_end
parts["col_start"] = parent.cell.col_start
parts["col_end"] = parent.cell.col_end
else:
raise NotImplementedError("Sentence parent must be Paragraph.")
yield Sentence(**parts)
state["sentence"]["idx"] += 1
def _parse_paragraph(
self, node: HtmlElement, state: Dict[str, Any]
) -> Iterator[Sentence]:
"""Parse a Paragraph of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
# Both Paragraphs will share the same parent
parent = (
state["context"][node]
if node in state["context"]
else state["parent"][node]
)
# Set name for Paragraph
name = node.attrib["name"] if "name" in node.attrib else None
if len(node.getchildren()) == 0: # leaf node
fields = ["text", "tail"]
elif node.get("visited") == "text": # .text was parsed already
fields = ["tail"]
node.set("visited", "true")
else:
fields = ["text"]
node.set("visited", "text")
self.stack.append(node) # will visit again later for tail
for field in fields:
text = getattr(node, field)
text = text.strip() if text and self.strip else text
# Skip if "" or None
if not text:
continue
# Run RegEx replacements
for (rgx, replace) in self.replacements:
text = rgx.sub(replace, text)
# Process the Paragraph
stable_id = (
f"{state['document'].name}"
f"::"
f"{'paragraph'}"
f":"
f"{state['paragraph']['idx']}"
)
parts = {}
parts["stable_id"] = stable_id
parts["name"] = name
parts["document"] = state["document"]
parts["position"] = state["paragraph"]["idx"]
if isinstance(parent, Caption):
if parent.table:
parts["section"] = parent.table.section
elif parent.figure:
parts["section"] = parent.figure.section
parts["caption"] = parent
elif isinstance(parent, Cell):
parts["section"] = parent.table.section
parts["cell"] = parent
elif isinstance(parent, Section):
parts["section"] = parent
elif isinstance(parent, Figure): # occurs with text in the tail of an img
parts["section"] = parent.section
elif isinstance(parent, Table): # occurs with text in the tail of a table
parts["section"] = parent.section
else:
raise NotImplementedError(
f"Para '{text}' parent must be Section, Caption, or Cell, "
f"not {parent}"
)
# Create the entry in the DB
paragraph = Paragraph(**parts)
state["paragraph"]["idx"] += 1
state["paragraph"]["text"] = text
state["paragraph"]["field"] = field
yield from self._parse_sentence(paragraph, node, state)
def _parse_section(
self, node: HtmlElement, state: Dict[str, Any]
) -> Dict[str, Any]:
"""Parse a Section of the node.
Note that this implementation currently creates a Section at the
beginning of the document and creates Section based on tag of node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
if node.tag not in ["html", "section"]:
return state
# Add a Section
stable_id = (
f"{state['document'].name}"
f"::"
f"{'section'}"
f":"
f"{state['section']['idx']}"
)
# Set name for Section
name = node.attrib["name"] if "name" in node.attrib else None
state["context"][node] = Section(
document=state["document"],
name=name,
stable_id=stable_id,
position=state["section"]["idx"],
)
state["section"]["idx"] += 1
return state
def _parse_caption(
self, node: HtmlElement, state: Dict[str, Any]
) -> Dict[str, Any]:
"""Parse a Caption of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
if node.tag not in ["caption", "figcaption"]: # captions used in Tables
return state
# Add a Caption
parent = state["parent"][node]
stable_id = (
f"{state['document'].name}"
f"::"
f"{'caption'}"
f":"
f"{state['caption']['idx']}"
)
# Set name for Section
name = node.attrib["name"] if "name" in node.attrib else None
if isinstance(parent, Table):
state["context"][node] = Caption(
document=state["document"],
table=parent,
figure=None,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
elif isinstance(parent, Figure):
state["context"][node] = Caption(
document=state["document"],
table=None,
figure=parent,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
else:
raise NotImplementedError("Caption must be a child of Table or Figure.")
state["caption"]["idx"] += 1
return state
def _parse_node(
self, node: HtmlElement, state: Dict[str, Any]
) -> Iterator[Sentence]:
"""Entry point for parsing all node types.
:param node: The lxml HTML node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
:return: a *generator* of Sentences
"""
# Processing on entry of node
if node.get("visited") != "text": # skip when .text has been parsed
state = self._parse_section(node, state)
state = self._parse_figure(node, state)
if self.tabular:
state = self._parse_table(node, state)
state = self._parse_caption(node, state)
yield from self._parse_paragraph(node, state)
def parse(self, document: Document, text: str) -> Iterator[Sentence]:
"""Depth-first search over the provided tree.
Implemented as an iterative procedure. The structure of the state
needed to parse each node is also defined in this function.
:param document: the Document context
:param text: the structured text of the document (e.g. HTML)
:return: a *generator* of Sentences.
"""
self.stack = []
root = lxml.html.fromstring(text)
# flattens children of node that are in the 'flatten' list
if self.flatten:
lxml.etree.strip_tags(root, self.flatten)
# Strip comments
lxml.etree.strip_tags(root, lxml.etree.Comment)
# Assign the text, which was stripped of the 'flatten'-tags, to the document
document.text = lxml.etree.tostring(root, encoding="unicode")
# This dictionary contain the global state necessary to parse a
# document and each context element. This reflects the relationships
# defined in parser/models. This contains the state necessary to create
# the respective Contexts within the document.
state = {
"parent": {}, # map of parent[child] = node used to discover child
"context": {}, # track the Context of each node (context['td'] = Cell)
"root": root,
"document": document,
"section": {"idx": 0},
"paragraph": {"idx": 0},
"figure": {"idx": 0},
"caption": {"idx": 0},
"table": {"idx": 0},
"sentence": {"idx": 0, "abs_offset": 0},
}
# NOTE: Currently the helper functions directly manipulate the state
# rather than returning a modified copy.
# Iterative Depth-First Search
self.stack.append(root)
state["parent"][root] = document
state["context"][root] = document
tokenized_sentences: List[Sentence] = []
while self.stack:
node = self.stack.pop()
if node.get("visited") != "true":
# Process
if self.lingual:
tokenized_sentences += [y for y in self._parse_node(node, state)]
else:
yield from self._parse_node(node, state)
# NOTE: This reversed() order is to ensure that the iterative
# DFS matches the order that would be produced by a recursive
# DFS implementation.
if node.get("visited") != "true":
for child in reversed(node):
# Skip nodes that are blacklisted
if self.blacklist and child.tag in self.blacklist:
continue
self.stack.append(child)
# store the parent of the node, which is either the parent
# Context, or if the parent did not create a Context, then
# use the node's parent Context.
state["parent"][child] = (
state["context"][node]
if node in state["context"]
else state["parent"][node]
)
else:
node.set("visited", "true") # mark as visited
if self.lingual:
yield from self.lingual_parser.enrich_sentences_with_NLP(
tokenized_sentences
)
| fonduer-master | src/fonduer/parser/parser.py |
"""Fonduer document preprocessor."""
import glob
import os
import sys
from typing import Iterator, List
from fonduer.parser.models.document import Document
class DocPreprocessor(object):
"""An abstract class of a ``Document`` generator.
Unless otherwise stated by a subclass, it's assumed that there is one ``Document``
per file.
"""
def __init__(
self, path: str, encoding: str = "utf-8", max_docs: int = sys.maxsize
) -> None:
"""Initialize DocPreprocessor.
:param path: a path to file or directory, or a glob pattern. The basename
(as returned by ``os.path.basename``) should be unique among all files.
:param encoding: file encoding to use, defaults to "utf-8".
:param max_docs: the maximum number of ``Documents`` to produce,
defaults to sys.maxsize.
:return: A generator of ``Documents``.
"""
self.path = path
self.encoding = encoding
self.max_docs = max_docs
self.all_files = self._get_files(self.path)
def _generate(self) -> Iterator[Document]:
"""Parse a file or directory of files into a set of ``Document`` objects."""
doc_count = 0
for fp in self.all_files:
for doc in self._get_docs_for_path(fp):
yield doc
doc_count += 1
if doc_count >= self.max_docs:
return
def __len__(self) -> int:
"""Get total number of documents."""
raise NotImplementedError(
"One generic file can yield more than one Document object, "
"so length can not be yielded before we process all files"
)
def __iter__(self) -> Iterator[Document]:
"""Get the generator of documents."""
return self._generate()
def _get_docs_for_path(self, fp: str) -> Iterator[Document]:
file_name = os.path.basename(fp)
if self._can_read(file_name):
for doc in self._parse_file(fp, file_name):
yield doc
def _get_stable_id(self, doc_id: str) -> str:
return f"{doc_id}::document:0:0"
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
raise NotImplementedError()
def _can_read(self, fpath: str) -> bool:
return not fpath.startswith(".")
def _get_files(self, path: str) -> List[str]:
if os.path.isfile(path):
fpaths = [path]
elif os.path.isdir(path):
fpaths = [os.path.join(path, f) for f in os.listdir(path)]
else:
fpaths = glob.glob(path)
fpaths = [x for x in fpaths if self._can_read(x)]
if len(fpaths) > 0:
return sorted(fpaths)
else:
raise IOError(
f"No readable file found at {path}. Check if {path} exists "
"and a filename ends with an allowable extension (e.g., .html)"
)
| fonduer-master | src/fonduer/parser/preprocessors/doc_preprocessor.py |
"""Fonduer hOCR document preprocessor."""
import codecs
import os
import re
import sys
from typing import Iterator, Optional, Tuple
from bs4 import BeautifulSoup
from bs4.element import Comment, NavigableString, Tag
from fonduer.parser.models import Document
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
class HOCRDocPreprocessor(DocPreprocessor):
"""A ``Document`` generator for hOCR files.
hOCR should comply with `hOCR v1.2`_.
Note that *ppageno* property of *ocr_page* is optional by `hOCR v1.2`_,
but is required by Fonduer.
.. _hOCR v1.2: http://kba.cloud/hocr-spec/1.2/
"""
def __init__(
self,
path: str,
encoding: str = "utf-8",
max_docs: int = sys.maxsize,
space: bool = True,
):
"""Initialize HOCRDocPreprocessor.
:param path: a path to file or directory, or a glob pattern. The basename
(as returned by ``os.path.basename``) should be unique among all files.
:param encoding: file encoding to use, defaults to "utf-8".
:param max_docs: the maximum number of ``Documents`` to produce,
defaults to sys.maxsize.
:param space: boolean value indicating whether each word should have a
subsequent space. E.g., English has spaces between words.
:return: A generator of ``Documents``.
"""
super().__init__(path, encoding, max_docs)
self.space = space
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
# Adapted from https://github.com/ocropus/hocr-tools/blob/v1.3.0/hocr-check
def get_prop(node: Tag, name: str) -> Optional[str]:
title = node["title"]
if not title:
return None
props = title.split(";")
for prop in props:
(key, args) = prop.split(None, 1)
if key == name:
return args
return None
# Adapted from https://github.com/ocropus/hocr-tools/blob/v1.3.0/hocr-check
def get_bbox(node: Tag) -> Tuple[str, ...]:
bbox = get_prop(node, "bbox")
if not bbox:
return None
return tuple([x for x in bbox.split()])
with codecs.open(fp, encoding=self.encoding) as f:
soup = BeautifulSoup(f, "lxml")
all_html_elements = soup.find_all("html")
if len(all_html_elements) != 1:
raise NotImplementedError(
f"Expecting exactly one html element per html file: {file_name}"
)
root = all_html_elements[0]
capabilities = root.find("meta", attrs={"name": "ocr-capabilities"})
if capabilities is None:
raise RuntimeError(
"The input hOCR does not contain ocr-capabilities metadata."
)
# Unwrap ocr_line/ocrx_line as Fonduer has no data model for lines.
if "ocr_line" in capabilities["content"]:
for line in root.find_all(class_="ocr_line"):
line.unwrap()
if "ocrx_line" in capabilities["content"]:
for line in root.find_all(class_="ocrx_line"):
line.unwrap()
if "ocrx_word" in capabilities["content"]:
for p, page in enumerate(root.find_all(class_="ocr_page")):
ppageno = str(p) # 0-based
for word in page.find_all(class_="ocrx_word"):
parent = word.parent
(left, top, right, bottom) = get_bbox(word)
# ocrx_word could have multiple words with one or more of spaces
# in-between. This actually happens on Tesseract 4.00.
# This is normalized by splitting and concatenating later.
tokens = word.text.split()
if "left" not in parent.attrs:
parent["left"] = []
parent["top"] = []
parent["right"] = []
parent["bottom"] = []
parent["ppageno"] = []
parent["tokens"] = []
parent["left"] += [left] * len(tokens)
parent["top"] += [top] * len(tokens)
parent["right"] += [right] * len(tokens)
parent["bottom"] += [bottom] * len(tokens)
parent["ppageno"] += [ppageno] * len(tokens)
parent["tokens"] += tokens
if "ocrp_wconf" in capabilities["content"]:
x_wconf = get_prop(word, "x_wconf")
if "x_wconf" not in parent.attrs:
parent["x_wconf"] = []
parent["x_wconf"].append(x_wconf)
# Mark the parent element
if "fonduer" not in parent.attrs:
parent["fonduer"] = ["1"]
# Concat words again with " " or "".
if len(tokens) > 1:
if self.space:
word.string.replace_with(" ".join(tokens))
else:
word.string.replace_with("".join(tokens))
word.unwrap()
# Clean-up
for i, parent in enumerate(root.find_all(attrs={"fonduer": "1"})):
# Concat consecutive NavigableString
parent.smooth() # beautifulsoup4 >= 4.8.0
# Remove linebreaks and excess spaces
# in reverse order b/c removing element from list in loop
for child in reversed(parent.contents):
if isinstance(child, Comment): # remove comments
child.extract()
elif isinstance(child, NavigableString):
if child.strip() == "": # remove if space or linebreak
child.extract()
else:
tmp = re.sub(r"[\n\s]+", " " if self.space else "", child)
n = NavigableString(tmp.strip())
child.replace_with(n)
del parent["fonduer"]
name = os.path.basename(fp)[: os.path.basename(fp).rfind(".")]
stable_id = self._get_stable_id(name)
yield Document(
name=name,
stable_id=stable_id,
text=str(root),
meta={"file_name": file_name},
)
def __len__(self) -> int:
"""Provide a len attribute based on max_docs and number of files in folder."""
num_docs = min(len(self.all_files), self.max_docs)
return num_docs
def _can_read(self, fpath: str) -> bool:
"""Return True if the path ends with either 'html' or 'hocr'."""
return fpath.lower().endswith("hocr") or fpath.lower().endswith("html")
| fonduer-master | src/fonduer/parser/preprocessors/hocr_doc_preprocessor.py |
"""Fonduer's parser preprocessor module."""
from fonduer.parser.preprocessors.csv_doc_preprocessor import CSVDocPreprocessor
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
from fonduer.parser.preprocessors.hocr_doc_preprocessor import HOCRDocPreprocessor
from fonduer.parser.preprocessors.html_doc_preprocessor import HTMLDocPreprocessor
from fonduer.parser.preprocessors.text_doc_preprocessor import TextDocPreprocessor
from fonduer.parser.preprocessors.tsv_doc_preprocessor import TSVDocPreprocessor
__all__ = [
"CSVDocPreprocessor",
"DocPreprocessor",
"HOCRDocPreprocessor",
"HTMLDocPreprocessor",
"TSVDocPreprocessor",
"TextDocPreprocessor",
]
| fonduer-master | src/fonduer/parser/preprocessors/__init__.py |
"""Fonduer HTML document preprocessor."""
import codecs
import os
from typing import Iterator
from bs4 import BeautifulSoup
from fonduer.parser.models import Document
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
class HTMLDocPreprocessor(DocPreprocessor):
"""A ``Document`` generator for HTML files."""
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
with codecs.open(fp, encoding=self.encoding) as f:
soup = BeautifulSoup(f, "lxml")
all_html_elements = soup.find_all("html")
if len(all_html_elements) != 1:
raise NotImplementedError(
f"Expecting exactly one html element per html file: {file_name}"
)
text = all_html_elements[0]
name = os.path.basename(fp)[: os.path.basename(fp).rfind(".")]
stable_id = self._get_stable_id(name)
yield Document(
name=name,
stable_id=stable_id,
text=str(text),
meta={"file_name": file_name},
)
def __len__(self) -> int:
"""Provide a len attribute based on max_docs and number of files in folder."""
num_docs = min(len(self.all_files), self.max_docs)
return num_docs
def _can_read(self, fpath: str) -> bool:
return fpath.lower().endswith("html") # includes both .html and .xhtml
| fonduer-master | src/fonduer/parser/preprocessors/html_doc_preprocessor.py |
"""Fonduer text document preprocessor."""
import codecs
import os
from typing import Iterator
from fonduer.parser.models import Document
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
from fonduer.utils.utils_parser import build_node
class TextDocPreprocessor(DocPreprocessor):
"""A ``Document`` generator for plain text files."""
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
with codecs.open(fp, encoding=self.encoding) as f:
name = os.path.basename(fp).rsplit(".", 1)[0]
stable_id = self._get_stable_id(name)
text = build_node("doc", None, build_node("text", None, f.read().strip()))
yield Document(
name=name, stable_id=stable_id, text=text, meta={"file_name": file_name}
)
def __len__(self) -> int:
"""Provide a len attribute based on max_docs and number of files in folder."""
num_docs = min(len(self.all_files), self.max_docs)
return num_docs
| fonduer-master | src/fonduer/parser/preprocessors/text_doc_preprocessor.py |
"""Fonduer CSV document preprocessor."""
import codecs
import csv
import os
import sys
from typing import Callable, Dict, Iterator, Optional
from fonduer.parser.models import Document
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
from fonduer.utils.utils_parser import build_node, column_constructor
class CSVDocPreprocessor(DocPreprocessor):
"""A ``Document`` generator for CVS files.
It treats each line in the input file as a ``Document``.
It assumes that each column is one ``Section`` and content in each column as one
``Paragraph`` by default. However, if the column is complex, an advanced parser
may be used by specifying ``parser_rule`` parameter in a dict format where key
is the column index and value is the specific parser, e,g., ``column_constructor``
in ``fonduer.utils.utils_parser``.
"""
def __init__(
self,
path: str,
encoding: str = "utf-8",
max_docs: int = sys.maxsize,
header: bool = False,
delim: str = ",",
parser_rule: Optional[Dict[int, Callable]] = None,
) -> None:
"""Initialize CSV DocPreprocessor.
:param path: a path to file or directory, or a glob pattern. The basename
(as returned by ``os.path.basename``) should be unique among all files.
:param encoding: file encoding to use (e.g. "utf-8").
:param max_docs: the maximum number of ``Documents`` to produce.
:param header: if the CSV file contain header or not, if yes, the header
will be used as Section name. default = False
:param delim: delimiter to be used to separate columns when file has
more than one column. It is active only when ``column is not
None``. default=','
:param parser_rule: The parser rule to be used to parse the specific column.
default = None
:return: A generator of ``Documents``.
"""
super().__init__(path, encoding, max_docs)
self.header = header
self.delim = delim
self.parser_rule = parser_rule
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
name = os.path.basename(fp)[: os.path.basename(fp).rfind(".")]
with codecs.open(fp, encoding=self.encoding) as f:
reader = csv.reader(f)
# Load CSV header
header_names = None
if self.header:
header_names = next(reader)
# Load document per row
for i, row in enumerate(reader):
sections = []
for j, content in enumerate(row):
rule = (
self.parser_rule[j]
if self.parser_rule is not None and j in self.parser_rule
else column_constructor
)
content_header = (
header_names[j] if header_names is not None else None
)
context = [
build_node(t, n, c)
# TODO: Fix this type ignore
for t, n, c in rule(content) # type: ignore
]
sections.append(
build_node("section", content_header, "".join(context))
)
text = build_node("doc", None, "".join(sections))
doc_name = name + ":" + str(i)
stable_id = self._get_stable_id(doc_name)
yield Document(
name=doc_name,
stable_id=stable_id,
text=text,
meta={"file_name": file_name},
)
def __len__(self) -> int:
"""Provide a len attribute based on max_docs and number of rows in files."""
cnt_docs = 0
for fp in self.all_files:
with codecs.open(fp, encoding=self.encoding) as csv:
num_lines = sum(1 for line in csv)
cnt_docs += num_lines - 1 if self.header else num_lines
if cnt_docs > self.max_docs:
break
num_docs = min(cnt_docs, self.max_docs)
return num_docs
def _can_read(self, fpath: str) -> bool:
return fpath.lower().endswith(".csv")
| fonduer-master | src/fonduer/parser/preprocessors/csv_doc_preprocessor.py |
"""Fonduer TSV document preprocessor."""
import codecs
import sys
from typing import Iterator
from fonduer.parser.models import Document
from fonduer.parser.preprocessors.doc_preprocessor import DocPreprocessor
from fonduer.utils.utils_parser import build_node
class TSVDocPreprocessor(DocPreprocessor):
"""A ``Document`` generator for TSV files.
It treats each line in the input file as a ``Document``.
The TSV file should have one (doc_name <tab> doc_text) per line.
"""
def __init__(
self,
path: str,
encoding: str = "utf-8",
max_docs: int = sys.maxsize,
header: bool = False,
) -> None:
"""Initialize TSV DocPreprocessor.
:param path: a path to file or directory, or a glob pattern. The basename
(as returned by ``os.path.basename``) should be unique among all files.
:param encoding: file encoding to use (e.g. "utf-8").
:param max_docs: the maximum number of ``Documents`` to produce.
:param header: if the TSV file contain header or not. default = False
:return: A generator of ``Documents``.
"""
super().__init__(path, encoding, max_docs)
self.header = header
def _parse_file(self, fp: str, file_name: str) -> Iterator[Document]:
with codecs.open(fp, encoding=self.encoding) as tsv:
if self.header:
tsv.readline()
for line in tsv:
(doc_name, doc_text) = line.split("\t")
stable_id = self._get_stable_id(doc_name)
text = build_node("doc", None, build_node("text", None, doc_text))
yield Document(
name=doc_name,
stable_id=stable_id,
text=text,
meta={"file_name": file_name},
)
def __len__(self) -> int:
"""Provide a len attribute based on max_docs and number of rows in files."""
cnt_docs = 0
for fp in self.all_files:
with codecs.open(fp, encoding=self.encoding) as tsv:
num_lines = sum(1 for line in tsv)
cnt_docs += num_lines - 1 if self.header else num_lines
if cnt_docs > self.max_docs:
break
num_docs = min(cnt_docs, self.max_docs)
return num_docs
def _can_read(self, fpath: str) -> bool:
return fpath.lower().endswith(".tsv")
| fonduer-master | src/fonduer/parser/preprocessors/tsv_doc_preprocessor.py |
"""A simple alternative tokenizer which parses text by splitting on whitespace."""
from typing import Any, Dict, Iterator
import numpy as np
from fonduer.parser.lingual_parser.lingual_parser import LingualParser
class SimpleParser(LingualParser):
"""Tokenizes text on whitespace only using split().
:param delim: a delimiter to split text into sentences.
"""
def __init__(self, delim: str = ".") -> None:
"""Initialize SimpleParser."""
self.delim = delim
def split_sentences(self, str: str) -> Iterator[Dict[str, Any]]:
"""Parse the document.
:param str: The text contents of the document.
:return: a *generator* of tokenized text.
"""
i = 0
for text in str.split(self.delim):
if not len(text.strip()):
continue
words = text.split()
char_offsets = [0] + [
int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1]
]
text = " ".join(words)
yield {
"text": text,
"words": words,
"pos_tags": [""] * len(words),
"ner_tags": [""] * len(words),
"lemmas": [""] * len(words),
"dep_parents": [0] * len(words),
"dep_labels": [""] * len(words),
"char_offsets": char_offsets,
"abs_char_offsets": char_offsets,
}
i += 1
def has_NLP_support(self) -> bool:
"""
Return True when NLP is supported.
:return: True when NLP is supported.
"""
return False
def has_tokenizer_support(self) -> bool:
"""
Return True when a tokenizer is supported.
:return: True when a tokenizer is supported.
"""
return True
| fonduer-master | src/fonduer/parser/lingual_parser/simple_parser.py |
"""Fonduer's lingual parser module."""
from fonduer.parser.lingual_parser.lingual_parser import LingualParser
from fonduer.parser.lingual_parser.simple_parser import SimpleParser
from fonduer.parser.lingual_parser.spacy_parser import SpacyParser
__all__ = ["LingualParser", "SpacyParser", "SimpleParser"]
| fonduer-master | src/fonduer/parser/lingual_parser/__init__.py |
"""Fonduer Spacy parser."""
import importlib
import logging
from collections import defaultdict
from pathlib import Path
from string import whitespace
from typing import Any, Collection, Dict, Iterator, List, Optional
import spacy
from spacy import util
from spacy.cli import download
from spacy.language import Language
from spacy.tokens import Doc
from spacy.util import is_package
from spacy.vocab import Vocab
from fonduer.parser.lingual_parser.lingual_parser import LingualParser
from fonduer.parser.models.sentence import Sentence
logger = logging.getLogger(__name__)
class SpacyParser(LingualParser):
"""Spacy parser class.
:param lang: Language. This can be one of
``["en", "de", "es", "pt", "fr", "it", "nl", "xx", "ja", "zh"]``.
See here_ for details of languages supported by spaCy.
.. _here: https://spacy.io/usage/models#languages.
"""
languages = ["en", "de", "es", "pt", "fr", "it", "nl", "xx", "ja", "zh"]
# Keep alpha_languages for future alpha supported languages
# E.g., alpha_languages = {"ja": "Japanese", "zh": "Chinese"}
alpha_languages: Dict[str, str] = {}
def __init__(self, lang: Optional[str]) -> None:
"""Initialize SpacyParser."""
self.name = "spacy"
self.lang = lang
self.model: Optional[Language] = None
if self.has_tokenizer_support():
self._load_lang_model()
def has_tokenizer_support(self) -> bool:
"""
Return True when a tokenizer is supported.
:return: True when a tokenizer is supported.
"""
return self.lang is not None and (
self.has_NLP_support() or self.lang in self.alpha_languages
)
def has_NLP_support(self) -> bool:
"""
Return True when NLP is supported.
:return: True when NLP is supported.
"""
return self.lang is not None and (self.lang in self.languages)
@staticmethod
def model_installed(name: str) -> bool:
"""Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
"""
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False
def _load_lang_model(self) -> None:
"""Load spaCy language model.
If a model is not installed, download it before loading it.
:return:
"""
if self.lang in self.languages:
if not SpacyParser.model_installed(self.lang):
download(self.lang)
model = spacy.load(self.lang)
elif self.lang in self.alpha_languages:
language_module = importlib.import_module(f"spacy.lang.{self.lang}")
language_method = getattr(language_module, self.alpha_languages[self.lang])
model = language_method()
self.model = model
def enrich_sentences_with_NLP(
self, sentences: Collection[Sentence]
) -> Iterator[Sentence]:
"""Enrich a list of fonduer Sentence objects with NLP features.
We merge and process the text of all Sentences for higher efficiency.
:param sentences: List of fonduer Sentence objects for one document
:return:
"""
if not self.has_NLP_support():
raise NotImplementedError(
f"Language {self.lang} not available in spacy beyond tokenization"
)
if len(sentences) == 0:
return # Nothing to parse
if self.model.has_pipe("sentencizer"):
self.model.remove_pipe("sentencizer")
logger.debug(
f"Removed sentencizer ('sentencizer') from model. "
f"Now in pipeline: {self.model.pipe_names}"
)
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
self.model.add_pipe(
set_custom_boundary, before="parser", name="sentence_boundary_detector"
)
sentence_batches: List[List[Sentence]] = self._split_sentences_by_char_limit(
sentences, self.model.max_length
)
# TODO: We could do this in parallel. Test speedup in the future
for sentence_batch in sentence_batches:
custom_tokenizer = TokenPreservingTokenizer(self.model.vocab)
# we circumvent redundant tokenization by using a custom
# tokenizer that directly uses the already separated words
# of each sentence as tokens
doc = custom_tokenizer(sentence_batch)
doc.user_data = sentence_batch
for name, proc in self.model.pipeline: # iterate over components in order
doc = proc(doc)
try:
assert doc.is_parsed
except Exception:
logger.exception(f"{doc} was not parsed")
for sent, current_sentence_obj in zip(doc.sents, sentence_batch):
parts: Dict[str, Any] = defaultdict(list)
for i, token in enumerate(sent):
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.tag_)
parts["ner_tags"].append(
token.ent_type_ if token.ent_type_ else "O"
)
head_idx = (
0 if token.head is token else token.head.i - sent[0].i + 1
)
parts["dep_parents"].append(head_idx)
parts["dep_labels"].append(token.dep_)
# Special case as Japanese model does not have "tagger" in pipeline
# Instead, Japanese model does tagging during tokenization.
if not self.lang == "ja":
current_sentence_obj.pos_tags = parts["pos_tags"]
current_sentence_obj.lemmas = parts["lemmas"]
current_sentence_obj.ner_tags = parts["ner_tags"]
current_sentence_obj.dep_parents = parts["dep_parents"]
current_sentence_obj.dep_labels = parts["dep_labels"]
yield current_sentence_obj
def _split_sentences_by_char_limit(
self, all_sentences: Collection[Sentence], batch_char_limit: int
) -> List[List[Sentence]]:
sentence_batches: List[List[Sentence]] = [[]]
num_chars = 0
for sentence in all_sentences:
if num_chars + len(sentence.text) >= batch_char_limit:
sentence_batches.append([sentence])
num_chars = len(sentence.text)
else:
sentence_batches[-1].append(sentence)
num_chars += len(sentence.text)
return sentence_batches
def split_sentences(self, text: str) -> Iterator[Dict[str, Any]]:
"""Split text into sentences.
Split input text into sentences that match CoreNLP's default format,
but are not yet processed.
:param text: The text of the parent paragraph of the sentences
:return:
"""
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
if not self.model.has_pipe("sentencizer"):
sentencizer = self.model.create_pipe("sentencizer") # add sentencizer
self.model.add_pipe(sentencizer)
try:
doc = self.model(text, disable=["parser", "tagger", "ner"])
except ValueError:
# temporary increase character limit of spacy
# 'Probably save' according to spacy, as no parser or NER is used
previous_max_length = self.model.max_length
self.model.max_length = 100_000_000
logger.warning(
f"Temporarily increased spacy maximum "
f"character limit to {self.model.max_length} to split sentences."
)
doc = self.model(text, disable=["parser", "tagger", "ner"])
self.model.max_length = previous_max_length
logger.warning(
f"Spacy maximum "
f"character limit set back to {self.model.max_length}."
)
except Exception as e:
logger.exception(e)
doc.is_parsed = True
position = 0
for sent in doc.sents:
parts: Dict[str, Any] = defaultdict(list)
for token in sent:
parts["words"].append(str(token))
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.pos_)
parts["ner_tags"].append("") # placeholder for later NLP parsing
parts["char_offsets"].append(token.idx)
parts["dep_parents"].append(0) # placeholder for later NLP parsing
parts["dep_labels"].append("") # placeholder for later NLP parsing
# make char_offsets relative to start of sentence
parts["char_offsets"] = [
p - parts["char_offsets"][0] for p in parts["char_offsets"]
]
parts["position"] = position
parts["text"] = sent.text
position += 1
yield parts
def set_custom_boundary(doc: Doc) -> Doc:
"""Set the boundaries of sentence.
Set the sentence boundaries based on the already separated sentences.
:param doc: doc.user_data should have a list of Sentence.
:return doc:
"""
if doc.user_data == {}:
raise AttributeError("A list of Sentence is not attached to doc.user_data.")
# Set every token.is_sent_start False because they are all True by default
for token_nr, token in enumerate(doc):
doc[token_nr].is_sent_start = False
# Set token.is_sent_start True when it is the first token of a Sentence
token_nr = 0
for sentence in doc.user_data:
doc[token_nr].is_sent_start = True
token_nr += len(sentence.words)
return doc
class TokenPreservingTokenizer(object):
"""Token perserving tokenizer.
This custom tokenizer simply preserves the tokenization that was already
performed during sentence splitting. It will output a list of space
separated tokens, whereas each token is a single word from the list of
sentences.
"""
def __init__(self, vocab: Vocab) -> None:
"""Initialize a custom tokenizer.
:param vocab: The vocab attribute of the respective spacy language object.
"""
self.vocab = vocab
def __call__(self, tokenized_sentences: List[Sentence]) -> Doc:
"""Apply the custom tokenizer.
:param tokenized_sentences: A list of sentences that was previously
tokenized/split by spacy
:return: Doc (a container for accessing linguistic annotations).
"""
all_input_tokens: List[str] = []
all_spaces: List[bool] = []
for sentence in tokenized_sentences:
words_in_sentence = sentence.words
if len(words_in_sentence) > 0:
all_input_tokens += sentence.words
current_sentence_pos = 0
spaces_list = [True] * len(words_in_sentence)
# Last word in sentence always assumed to be followed by space
for i, word in enumerate(words_in_sentence[:-1]):
current_sentence_pos = sentence.text.find(
word, current_sentence_pos
)
if current_sentence_pos == -1:
raise AttributeError(
"Could not find token in its parent sentence"
)
current_sentence_pos += len(word)
if not any(
sentence.text[current_sentence_pos:].startswith(s)
for s in whitespace
):
spaces_list[i] = False
all_spaces += spaces_list
return Doc(self.vocab, words=all_input_tokens, spaces=all_spaces)
| fonduer-master | src/fonduer/parser/lingual_parser/spacy_parser.py |
"""Fonduer lingual parser."""
from typing import Collection, Iterable, Iterator
from fonduer.parser.models import Sentence
class LingualParser(object):
"""Lingual parser."""
def split_sentences(self, text: str) -> Iterable[dict]:
"""
Split input text into sentences.
:param text: text to be split
:return: A generator of dict that is used as `**kwargs` to instantiate
:class:`Sentence <fonduer.parser.models.Sentence>`.
"""
raise NotImplementedError()
def enrich_sentences_with_NLP(
self, sentences: Collection[Sentence]
) -> Iterator[Sentence]:
"""
Add NLP attributes like lemmas, pos_tags, etc. to sentences.
:param sentences: a iterator of
:class:`Sentence <fonduer.parser.models.Sentence>`.
:return: a generator of :class:`Sentence <fonduer.parser.models.Sentence>`.
"""
raise NotImplementedError()
def has_NLP_support(self) -> bool:
"""
Return True when NLP is supported.
:return: True when NLP is supported.
"""
raise NotImplementedError()
def has_tokenizer_support(self) -> bool:
"""
Return True when a tokenizer is supported.
:return: True when a tokenizer is supported.
"""
raise NotImplementedError()
| fonduer-master | src/fonduer/parser/lingual_parser/lingual_parser.py |
"""Fonduer webpage context model."""
from sqlalchemy import Column, ForeignKey, Integer, String
from fonduer.parser.models.context import Context
class Webpage(Context):
"""A Webpage Context enhanced with additional metadata."""
__tablename__ = "webpage"
#: The unique id of the ``Webpage``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The name of a ``Webpage``.
name = Column(String, unique=False, nullable=True)
#: The URL of the ``Webpage``.
url = Column(String)
#: The host of the ``Webpage``.
host = Column(String)
#: The type of the ``Webpage``.
page_type = Column(String)
#: The raw content of the ``Webpage``.
raw_content = Column(String)
#: The timestamp of when the ``Webpage`` was crawled.
crawltime = Column(String)
all = Column(String)
# Polymorphism information for SQLAlchemy
__mapper_args__ = {"polymorphic_identity": "webpage"}
# Rest of class definition here
def __repr__(self) -> str:
"""Represent the context as a string."""
return f"Webpage(id: {self.name[:10]}..., url: {self.url[8:23]}...)"
| fonduer-master | src/fonduer/parser/models/webpage.py |
"""Fonduer figure context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Figure(Context):
"""A figure Context in a Document.
Used to represent figures in a document.
.. note:: As of v0.6.2, ``<img>`` and ``<figure>`` tags turn into ``Figure``.
"""
__tablename__ = "figure"
#: The unique id of the ``Figure``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Figure`` in the ``Document``.
position = Column(Integer, nullable=False)
#: The name of a ``Figure``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id", ondelete="CASCADE"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("section.id"))
#: The parent ``Section``.
section = relationship(
"Section",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=section_id,
)
#: The id of the parent ``Cell``, if any.
cell_id = Column(Integer, ForeignKey("cell.id"))
#: The the parent ``Cell``, if any.
cell = relationship(
"Cell",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=cell_id,
)
#: The ``Figure``'s URL.
url = Column(String)
__mapper_args__ = {"polymorphic_identity": "figure"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
if self.cell:
return (
f"Figure("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Cell: {self.cell.position}, "
f"Pos: {self.position}, "
f"Url: {self.url}"
f")"
)
else:
return (
f"Figure("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Pos: {self.position}, "
f"Url: {self.url}"
f")"
)
def __gt__(self, other: "Figure") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/figure.py |
"""Fonduer's parser model module."""
from fonduer.parser.models.caption import Caption
from fonduer.parser.models.context import Context
from fonduer.parser.models.document import Document
from fonduer.parser.models.figure import Figure
from fonduer.parser.models.paragraph import Paragraph
from fonduer.parser.models.section import Section
from fonduer.parser.models.sentence import Sentence
from fonduer.parser.models.table import Cell, Table
from fonduer.parser.models.webpage import Webpage
__all__ = [
"Caption",
"Cell",
"Context",
"Document",
"Figure",
"Paragraph",
"Section",
"Sentence",
"Table",
"Webpage",
]
| fonduer-master | src/fonduer/parser/models/__init__.py |
"""Fonduer section context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Section(Context):
"""A Section Context in a Document.
.. note:: As of v0.6.2, each document simply has a single Section.
Specifically, ``<html>`` and ``<section>`` tags turn into ``Section``.
Future parsing improvements can add better section recognition, such as the
sections of an academic paper.
"""
__tablename__ = "section"
#: The unique id of the ``Section``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The name of a ``Section``.
name = Column(String, unique=False, nullable=True)
#: The position of the ``Section`` in a ``Document``.
position = Column(Integer, nullable=False)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id", ondelete="CASCADE"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("sections", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
__mapper_args__ = {"polymorphic_identity": "section"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
return f"Section(Doc: {self.document.name}, Pos: {self.position})"
def __gt__(self, other: "Section") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/section.py |
"""Fonduer caption context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Caption(Context):
"""A Caption Context in a Document.
Used to represent figure or table captions in a document.
.. note:: As of v0.6.2, ``<caption>`` and ``<figcaption>`` tags turn into
``Caption``.
"""
__tablename__ = "caption"
#: The unique id the ``Caption``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Caption`` in the ``Document``.
position = Column(Integer, nullable=False)
#: The name of a ``Caption``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("captions", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Table``, if any.
table_id = Column(Integer, ForeignKey("table.id"))
#: The parent ``Table``, if any.
table = relationship(
"Table",
backref=backref("captions", order_by=position, cascade="all, delete-orphan"),
foreign_keys=table_id,
)
#: The id of the parent ``Figure``, if any.
figure_id = Column(Integer, ForeignKey("figure.id"))
#: The parent ``Figure``, if any.
figure = relationship(
"Figure",
backref=backref("captions", order_by=position, cascade="all, delete-orphan"),
foreign_keys=figure_id,
)
__mapper_args__ = {"polymorphic_identity": "caption"}
__table_args__ = (UniqueConstraint(document_id, table_id, figure_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
if self.figure:
return (
f"Caption("
f"Doc: {self.document.name}, "
f"Figure: {self.figure.position}, "
f"Pos: {self.position}"
f")"
)
elif self.table:
return (
f"Caption("
f"Doc: {self.document.name}, "
f"Table: {self.table.position}, "
f"Pos: {self.position}"
f")"
)
else:
raise NotImplementedError(
"Caption must be associated with Figure or Table."
)
def __gt__(self, other: "Caption") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/caption.py |
"""Fonduer sentence context model."""
from builtins import object
from typing import Any, Dict
from sqlalchemy import Column, ForeignKey, Integer, String, Text, UniqueConstraint
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
from fonduer.utils.utils_visual import Bbox
INT_ARRAY_TYPE = postgresql.ARRAY(Integer)
STR_ARRAY_TYPE = postgresql.ARRAY(String)
class SentenceMixin(object):
"""A sentence Context in a Document."""
def is_lingual(self) -> bool:
"""
Return True when lingual information is available.
:return: True when lingual information is available.
"""
return False
def is_visual(self) -> bool:
"""
Return True when visual information is available.
:return: True when visual information is available.
"""
return False
def is_tabular(self) -> bool:
"""
Return True when tabular information is available.
:return: True when tabular information is available.
"""
return False
def is_structural(self) -> bool:
"""
Return True when structural information is available.
:return: True when structural information is available.
"""
return False
class LingualMixin(object):
"""A collection of lingual attributes."""
@declared_attr
def lemmas(cls) -> Column:
"""List of the lemmas for each word in a ``Sentence``."""
return Column(STR_ARRAY_TYPE)
@declared_attr
def pos_tags(cls) -> Column:
"""List of POS tags for each word in a ``Sentence``."""
return Column(STR_ARRAY_TYPE)
@declared_attr
def ner_tags(cls) -> Column:
"""List of NER tags for each word in a ``Sentence``."""
return Column(STR_ARRAY_TYPE)
@declared_attr
def dep_parents(cls) -> Column:
"""List of the dependency parents for each word in a ``Sentence``."""
return Column(INT_ARRAY_TYPE)
@declared_attr
def dep_labels(cls) -> Column:
"""List of dependency labels for each word in a ``Sentence``."""
return Column(STR_ARRAY_TYPE)
def is_lingual(self) -> bool:
"""Whether or not the ``Sentence`` contains NLP information."""
return self.lemmas is not None
class TabularMixin(object):
"""A collection of tabular attributes."""
@declared_attr
def table_id(cls) -> Column:
"""Id of the parent ``Table``, if any."""
return Column("table_id", ForeignKey("table.id"))
@declared_attr
def table(cls) -> relationship:
"""Parent ``Table``, if any."""
return relationship(
"Table",
backref=backref("sentences", cascade="all, delete-orphan"),
foreign_keys=lambda: cls.table_id,
)
@declared_attr
def cell_id(cls) -> Column:
"""Id of the parent ``Cell``, if any."""
return Column("cell_id", ForeignKey("cell.id"))
@declared_attr
def cell(cls) -> relationship:
"""Parent ``Cell``, if any."""
return relationship(
"Cell",
backref=backref("sentences", cascade="all, delete-orphan"),
foreign_keys=lambda: cls.cell_id,
)
@declared_attr
def row_start(cls) -> Column:
"""``row_start`` of the parent ``Cell``, if any."""
return Column(Integer)
@declared_attr
def row_end(cls) -> Column:
"""``row_end`` of the parent ``Cell``, if any."""
return Column(Integer)
@declared_attr
def col_start(cls) -> Column:
"""``col_start`` of the parent ``Cell``, if any."""
return Column(Integer)
@declared_attr
def col_end(cls) -> Column:
"""``col_end`` of the parent ``Cell``, if any."""
return Column(Integer)
def is_tabular(self) -> bool:
"""Whether or not the ``Sentence`` contains tabular information."""
return self.table is not None
def is_cellular(self) -> bool:
"""Whether or not the ``Sentence`` contains information about its table cell."""
return self.cell is not None
class VisualMixin(object):
"""A collection of visual attributes."""
@declared_attr
def page(cls) -> Column:
"""List of the page index of each word in the ``Sentence``.
Page indexes start at 1.
"""
return Column(INT_ARRAY_TYPE)
@declared_attr
def top(cls) -> Column:
"""List of each word's TOP bounding box coordinate in the ``Sentence``."""
return Column(INT_ARRAY_TYPE)
@declared_attr
def left(cls) -> Column:
"""List of each word's LEFT bounding box coordinate in the ``Sentence``."""
return Column(INT_ARRAY_TYPE)
@declared_attr
def bottom(cls) -> Column:
"""List of each word's BOTTOM bounding box coordinate in the ``Sentence``."""
return Column(INT_ARRAY_TYPE)
@declared_attr
def right(cls) -> Column:
"""List of each word's RIGHT bounding box coordinate in the ``Sentence``."""
return Column(INT_ARRAY_TYPE)
def is_visual(self) -> bool:
"""Whether or not the ``Sentence`` contains visual information."""
return self.page is not None and self.page[0] is not None
def get_bbox(self) -> Bbox:
"""Get the bounding box."""
# TODO: this may have issues where a sentence is linked to words on different
# pages
if self.is_visual():
return Bbox(
self.page[0],
min(self.top),
max(self.bottom),
min(self.left),
max(self.right),
)
else:
return None
class StructuralMixin(object):
"""A collection of structural attributes."""
@declared_attr
def xpath(cls) -> Column:
"""HTML XPATH to the ``Sentence``."""
return Column(String)
@declared_attr
def html_tag(cls) -> Column:
"""HTML tag of the element containing the ``Sentence``."""
return Column(String)
#: The HTML attributes of the element the ``Sentence`` is found in.
@declared_attr
def html_attrs(cls) -> Column:
"""List of the html attributes of the element containing the ``Sentence``."""
return Column(STR_ARRAY_TYPE)
def is_structural(self) -> bool:
"""Whether or not the ``Sentence`` contains structural information."""
return self.html_tag is not None
# SentenceMixin must come last in arguments to not ovewrite is_* methods
# class Sentence(Context, StructuralMixin, SentenceMixin): # Memex variant
class Sentence(
Context, TabularMixin, LingualMixin, VisualMixin, StructuralMixin, SentenceMixin
):
"""A Sentence subclass with Lingual, Tabular, Visual, and HTML attributes.
.. note:: Unlike other data models, there is no HTML element corresponding to
``Sentence``. One ``Paragraph`` comprises one or more of ``Sentence``, but how a
``Paragraph`` is split depends on which NLP parser (e.g., spaCy) is used.
"""
__tablename__ = "sentence"
#: The unique id for the ``Sentence``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Sentence`` in the ``Document``.
position = Column(Integer, nullable=False) # unique sentence number per document
#: The name of a ``Sentence``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id"))
#: The the parent ``Document``.
document = relationship(
"Document",
backref=backref("sentences", cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("section.id"))
#: The parent ``Section``.
section = relationship(
"Section",
backref=backref("sentences", cascade="all, delete-orphan"),
foreign_keys=section_id,
)
#: The id of the parent ``Paragraph``.
paragraph_id = Column(Integer, ForeignKey("paragraph.id"))
#: The parent ``Paragraph``.
paragraph = relationship(
"Paragraph",
backref=backref("sentences", cascade="all, delete-orphan"),
foreign_keys=paragraph_id,
)
#: The full text of the ``Sentence``.
text = Column(Text, nullable=False)
#: A list of the words in a ``Sentence``.
words = Column(STR_ARRAY_TYPE)
#: A list of the character offsets of each word in a ``Sentence``, with
#: respect to the start of the sentence.
char_offsets = Column(INT_ARRAY_TYPE)
#: A list of the character offsets of each word in a ``Sentence``, with
#: respect to the entire document.
abs_char_offsets = Column(INT_ARRAY_TYPE)
__mapper_args__ = {"polymorphic_identity": "sentence"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
if self.is_tabular():
rows = (
tuple([self.row_start, self.row_end])
if self.row_start != self.row_end
else self.row_start
)
cols = (
tuple([self.col_start, self.col_end])
if self.col_start != self.col_end
else self.col_start
)
return (
f"Sentence ("
f"Doc: '{self.document.name}', "
f"Table: {self.table.position}, "
f"Row: {rows}, "
f"Col: {cols}, "
f"Index: {self.position}, "
f"Text: '{self.text}'"
f")"
)
else:
return (
f"Sentence ("
f"Doc: '{self.document.name}', "
f"Sec: {self.section.position}, "
f"Par: {self.paragraph.position}, "
f"Idx: {self.position}, "
f"Text: '{self.text}'"
f")"
)
def _asdict(self) -> Dict[str, Any]:
return {
# base
"id": self.id,
# 'document': self.document,
"position": self.position,
"text": self.text,
# tabular
# 'table': self.table,
# 'cell': self.cell,
"row_start": self.row_start,
"row_end": self.row_end,
"col_start": self.col_start,
"col_end": self.col_end,
# lingual
"words": self.words,
"char_offsets": self.char_offsets,
"lemmas": self.lemmas,
"pos_tags": self.pos_tags,
"ner_tags": self.ner_tags,
"dep_parents": self.dep_parents,
"dep_labels": self.dep_labels,
# visual
"page": self.page,
"top": self.top,
"bottom": self.bottom,
"left": self.left,
"right": self.right,
}
def __gt__(self, other: "Sentence") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/sentence.py |
"""Fonduer context model."""
from sqlalchemy import Column, Integer, String
from fonduer.meta import Meta
class Context(Meta.Base):
"""A piece of content from which Candidates are composed.
This serves as the base class of the Fonduer document model.
"""
__tablename__ = "context"
#: The unique id of the ``Context``.
id = Column(Integer, primary_key=True)
#: The type of the ``Context`` represented as a string (e.g. "sentence",
#: "paragraph", "figure").
type = Column(String, nullable=False)
#: A stable representation of the ``Context`` that will not change between
#: runs.
stable_id = Column(String, unique=True, nullable=False)
__mapper_args__ = {"polymorphic_identity": "context", "polymorphic_on": type}
| fonduer-master | src/fonduer/parser/models/context.py |
"""Utilities for constructing and splitting stable ids."""
from typing import List, Tuple
from fonduer.parser.models import Context
def construct_stable_id(
parent_context: Context,
polymorphic_type: str,
relative_char_offset_start: int,
relative_char_offset_end: int,
) -> str:
"""Construct Context's stable ID.
Construct a stable ID for a Context given its parent and its character
offsets relative to the parent.
"""
doc_id, type, idx = split_stable_id(parent_context.stable_id)
if polymorphic_type in [
"section_mention",
"figure_mention",
"table_mention",
"paragraph_mention",
"caption_mention",
]:
parent_doc_start = idx[0]
return f"{doc_id}::{polymorphic_type}:{parent_doc_start}"
elif polymorphic_type in ["cell_mention"]:
cell_pos = idx[0]
cell_row_start = idx[1]
cell_col_start = idx[2]
return (
f"{doc_id}::{polymorphic_type}:{cell_pos}:{cell_row_start}:{cell_col_start}"
)
elif polymorphic_type in ["sentence", "document_mention", "span_mention"]:
parent_doc_char_start = idx[0]
start = parent_doc_char_start + relative_char_offset_start
end = parent_doc_char_start + relative_char_offset_end
return f"{doc_id}::{polymorphic_type}:{start}:{end}"
raise ValueError(f"Unrecognized context type:\t{polymorphic_type}")
def split_stable_id(
stable_id: str,
) -> Tuple[str, str, List[int]]:
"""Split stable ID.
Analyzing stable ID and return the following information:
* Document (root) stable ID
* Context polymorphic type
* Character offset start, end *relative to document start*
Returns tuple of four values.
"""
split1 = stable_id.split("::")
if len(split1) == 2:
split2 = split1[1].split(":")
type = split2[0]
idx = [int(_) for _ in split2[1:]]
return split1[0], type, idx
raise ValueError(f"Malformed stable_id:\t{stable_id}")
| fonduer-master | src/fonduer/parser/models/utils.py |
"""Fonduer document context model."""
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.types import PickleType
from fonduer.parser.models.context import Context
class Document(Context):
"""A document Context.
Represents all the information of a particular document.
What becomes a document depends on which child class of ``DocPreprocessor`` is used.
.. note:: As of v0.6.2, each file is one document when ``HTMLDocPreprocessor`` or
``TextDocPreprocessor`` is used, each line in the input file is treated as one
document when ``CSVDocPreprocessor`` or ``TSVDocPreprocessor`` is used.
"""
__tablename__ = "document"
#: The unique id of a ``Document``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The filename of a ``Document``, without its extension (e.g., "BC818").
name = Column(String, unique=True, nullable=False)
#: The full text of the ``Document``.
text = Column(String)
#: Pickled metadata about a document extrated from a document preprocessor.
meta = Column(PickleType)
__mapper_args__ = {"polymorphic_identity": "document"}
def __repr__(self) -> str:
"""Represent the context as a string."""
return f"Document {self.name}"
def __gt__(self, other: "Document") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/document.py |
"""Fonduer paragraph context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Paragraph(Context):
"""A paragraph Context in a Document.
Represents a grouping of adjacent sentences.
.. note:: As of v0.6.2, a text content in two properties ``.text`` and ``.tail``
turn into ``Paragraph``.
See https://lxml.de/tutorial.html#elements-contain-text for details about
``.text`` and ``.tail`` properties.
"""
__tablename__ = "paragraph"
#: The unique id of the ``Paragraph``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Paragraph`` in the ``Document``.
position = Column(Integer, nullable=False)
#: The name of a ``Paragraph``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("paragraphs", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("section.id"))
#: The parent ``Section``.
section = relationship(
"Section",
backref=backref("paragraphs", cascade="all, delete-orphan"),
foreign_keys=section_id,
)
#: The id of the parent ``Cell``, if any.
cell_id = Column(Integer, ForeignKey("cell.id"))
#: The parent ``Cell``, if any.
cell = relationship(
"Cell",
backref=backref("paragraphs", cascade="all, delete-orphan"),
foreign_keys=cell_id,
)
#: The id of the parent ``Caption``, if any.
caption_id = Column(Integer, ForeignKey("caption.id"))
#: The parent ``Caption``, if any.
caption = relationship(
"Caption",
backref=backref("paragraphs", cascade="all, delete-orphan"),
foreign_keys=caption_id,
)
__mapper_args__ = {"polymorphic_identity": "paragraph"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
if self.cell:
return (
f"Paragraph("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Cell: {self.cell.position}, "
f"Pos: {self.position}"
f")"
)
elif self.caption:
return (
f"Paragraph("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Caption: {self.caption.position}, "
f"Pos: {self.position}"
")"
)
else:
return (
f"Paragraph("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Pos: {self.position}"
f")"
)
def __gt__(self, other: "Paragraph") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/paragraph.py |
"""Fonduer table context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Table(Context):
"""A Table Context in a Document.
Used to represent tables found in a document.
.. note:: As of v0.6.2, ``<table>`` tags turn into ``Table``.
"""
__tablename__ = "table"
#: The unique id of the ``Table``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Table`` in the ``Document``.
position = Column(Integer, nullable=False)
#: The name of a ``Table``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("tables", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("section.id"))
#: The parent ``Section``.
section = relationship(
"Section",
backref=backref("tables", order_by=position, cascade="all, delete-orphan"),
foreign_keys=section_id,
)
__mapper_args__ = {"polymorphic_identity": "table"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
return (
f"Table("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Position: {self.position}"
f")"
)
def __gt__(self, other: "Table") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
class Cell(Context):
"""A cell Context in a Document.
Used to represent the cells that comprise a table in a document.
.. note:: As of v0.6.2, ``<th>`` and ``<td>`` tags turn into ``Cell``.
"""
__tablename__ = "cell"
#: The unique id of the ``Cell``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Cell`` in the ``Table``.
position = Column(Integer, nullable=False)
#: The name of a ``Cell``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Table``.
table_id = Column(Integer, ForeignKey("table.id"))
#: The parent ``Table``.
table = relationship(
"Table",
backref=backref("cells", order_by=position, cascade="all, delete-orphan"),
foreign_keys=table_id,
)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("cells", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The start index of the row in the ``Table`` the ``Cell`` is in.
row_start = Column(Integer)
#: The end index of the row in the ``Table`` the ``Cell`` is in.
row_end = Column(Integer)
#: The start index of the column in the ``Table`` the ``Cell`` is in.
col_start = Column(Integer)
#: The end index of the column in the ``Table`` the ``Cell`` is in.
col_end = Column(Integer)
__mapper_args__ = {"polymorphic_identity": "cell"}
__table_args__ = (UniqueConstraint(document_id, table_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
return (
f"Cell(Doc: {self.document.name}, "
f"Table: {self.table.position}, "
f"Row: {tuple({self.row_start, self.row_end})}, "
f"Col: {tuple({self.col_start, self.col_end})}, "
f"Pos: {self.position})"
)
def __gt__(self, other: "Cell") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| fonduer-master | src/fonduer/parser/models/table.py |
"""Fonduer visual parser that parses visual information from hOCR."""
import itertools
import re
from typing import Dict, Iterable, Iterator, List, Pattern, Tuple, Union
import spacy
import spacy.gold
from packaging import version
from spacy.gold import align
from fonduer.parser.models import Sentence
from fonduer.parser.visual_parser.visual_parser import VisualParser
class HocrVisualParser(VisualParser):
"""Visual Parser for hOCR."""
def __init__(
self,
replacements: List[Tuple[str, str]] = [
("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")
],
):
"""Initialize a visual parser.
:raises ImportError: an error is raised when spaCy is not 2.3.0 or later.
"""
if version.parse(spacy.__version__) < version.parse("2.3.0"):
raise ImportError(
f"You are using spaCy {spacy.__version__}, "
f"but it should be 2.3.0 or later to use HocrVisualParser."
)
self.replacements: List[Tuple[Pattern, str]] = []
for (pattern, replace) in replacements:
self.replacements.append((re.compile(pattern, flags=re.UNICODE), replace))
def parse(
self, document_name: str, sentences: Iterable[Sentence]
) -> Iterator[Sentence]:
"""Parse visual information embedded in sentence's html_attrs.
:param document_name: the document name.
:param sentences: sentences to be linked with visual information.
:return: A generator of ``Sentence``.
"""
def attrib_parse(
html_attrs: List[str],
) -> Dict[str, Union[List[int], List[str]]]:
ret: Dict[str, Union[List[int], List[str]]] = {}
for attr in html_attrs:
key, values = attr.split("=", 1) # split only at the first occurence
if key in ["left", "top", "right", "bottom", "ppageno"]:
ret[key] = [int(x) for x in values.split()]
elif key == "tokens":
# Run RegEx replacements
for (rgx, replace) in self.replacements:
values = rgx.sub(replace, values)
ret[key] = values.split()
return ret
for _, group in itertools.groupby(sentences, key=lambda x: x.xpath):
sents = list(group)
# Get bbox from document
attribs = attrib_parse(sents[0].html_attrs)
lefts = attribs["left"]
tops = attribs["top"]
rights = attribs["right"]
bottoms = attribs["bottom"]
ppagenos = attribs["ppageno"]
# Clear the hocr specific html_attrs
for sent in sents:
for attr in sent.html_attrs[:]:
key, values = attr.split(
"=", 1
) # split only at the first occurence
if key in [
"left",
"top",
"right",
"bottom",
"ppageno",
"tokens",
"x_wconf",
]:
sent.html_attrs.remove(attr)
# Get a list of all tokens represented by ocrx_word in hOCR
hocr_tokens = attribs["tokens"]
# Get a list of all tokens tokenized by spaCy.
spacy_tokens = [word for sent in sents for word in sent.words]
# gold.align assumes that both tokenizations add up to the same string.
cost, h2s, s2h, h2s_multi, s2h_multi = align(hocr_tokens, spacy_tokens)
ptr = 0 # word pointer
for sent in sents:
sent.left = []
sent.top = []
sent.right = []
sent.bottom = []
sent.page = []
for i, word in enumerate(sent.words):
# One-to-one mapping is NOT available
if s2h[ptr + i] == -1:
if ptr + i in s2h_multi: # One spacy token-to-multi hOCR words
left = lefts[s2h_multi[ptr + i]]
top = tops[s2h_multi[ptr + i]]
right = rights[s2h_multi[ptr + i]]
bottom = bottoms[s2h_multi[ptr + i]]
ppageno = ppagenos[s2h_multi[ptr + i]]
else:
h2s_multi_idx = [
k for k, v in h2s_multi.items() if ptr + i == v
]
start, end = 0, 0
if h2s_multi_idx: # One hOCR word-to-multi spacy tokens
start = h2s_multi_idx[0]
end = h2s_multi_idx[-1] + 1
else:
start = s2h_multi[i - 1 if i > 0 else 0]
end = s2h_multi[i + 1] + 1
# calculate a bbox that can include all
left = min(lefts[start:end])
top = min(tops[start:end])
right = max(rights[start:end])
bottom = max(bottoms[start:end])
ppageno = ppagenos[start]
# One-to-one mapping is available
else:
left = lefts[s2h[ptr + i]]
top = tops[s2h[ptr + i]]
right = rights[s2h[ptr + i]]
bottom = bottoms[s2h[ptr + i]]
ppageno = ppagenos[s2h[ptr + i]]
sent.left.append(left)
sent.top.append(top)
sent.right.append(right)
sent.bottom.append(bottom)
sent.page.append(ppageno + 1) # 1-based in Fonduer
ptr += len(sent.words)
yield sent
def is_parsable(self, document_name: str) -> bool:
"""Whether visual information can be parsed. Currently always return True.
:param document_name: the document name.
"""
return True
| fonduer-master | src/fonduer/parser/visual_parser/hocr_visual_parser.py |
"""Fonduer's visual parser module."""
from fonduer.parser.visual_parser.hocr_visual_parser import HocrVisualParser
from fonduer.parser.visual_parser.pdf_visual_parser import PdfVisualParser
from fonduer.parser.visual_parser.visual_parser import VisualParser
__all__ = ["VisualParser", "PdfVisualParser", "HocrVisualParser"]
| fonduer-master | src/fonduer/parser/visual_parser/__init__.py |
"""Fonduer visual parser that parses visual information from PDF."""
import logging
import os
import re
import shutil
import subprocess
from builtins import range, zip
from collections import OrderedDict, defaultdict
from operator import attrgetter
from typing import DefaultDict, Dict, Iterable, Iterator, List, Optional, Tuple
import numpy as np
from bs4 import BeautifulSoup
from bs4.element import Tag
from editdistance import eval as editdist # Alternative library: python-levenshtein
from fonduer.parser.models import Sentence
from fonduer.parser.visual_parser.visual_parser import VisualParser
from fonduer.utils.utils_visual import Bbox
logger = logging.getLogger(__name__)
# Define a type alias for readability
# PdfWordId is an ID for a word that is unique within a PDF file.
# The two elements represent (page_num, i), where page_num is 1-based page number and
# i is a 0-based unique ID for a word within that page.
PdfWordId = Tuple[int, int]
# PdfWord is a type alias for (PdfWordId, str), where the second element is a string
# repsentation of the word.
PdfWord = Tuple[PdfWordId, str]
# Similarly, HtmlWordId is an ID for word that is unique within a HTML file.
# The two elements represent (sentence.stable_id, i), where i is an ID within a
# Sentence.
HtmlWordId = Tuple[str, int]
# Similar to PdfWord, HtmlWord is a type alias for (HtmlWordId, str), where the second
# element is a string repsentation of the word.
HtmlWord = Tuple[HtmlWordId, str]
class PdfVisualParser(VisualParser):
"""Link visual information, extracted from PDF, with parsed sentences.
This linker assumes the following conditions for expected results:
- The PDF file exists in a directory specified by `pdf_path`.
- The basename of the PDF file is same as the *document name*
and its extension is either ".pdf" or ".PDF".
- A PDF has a text layer.
"""
def __init__(self, pdf_path: str, verbose: bool = False) -> None:
"""Initialize VisualParser.
:param pdf_path: a path to directory that contains PDF files.
:param verbose: whether to turn on verbose logging.
"""
if not os.path.isdir(pdf_path):
raise ValueError(f"No directory exists at {pdf_path}!")
self.pdf_path = pdf_path
self.pdf_file: Optional[str] = None
self.verbose = verbose
self.coordinate_map: Optional[Dict[PdfWordId, Bbox]] = None
self.pdf_word_list: Optional[List[PdfWord]] = None
self.html_word_list: Optional[List[HtmlWord]] = None
self.links: Optional[OrderedDict[HtmlWordId, PdfWordId]] = None
self.pdf_dim: Optional[Tuple[int, int]] = None
delimiters = (
r"([\(\)\,\?\u2212\u201C\u201D\u2018\u2019\u00B0\*']|(?<!http):|\.$|\.\.\.)"
)
self.separators = re.compile(delimiters)
# Check if poppler-utils is installed AND the version is 0.36.0 or above
if shutil.which("pdfinfo") is None or shutil.which("pdftotext") is None:
raise RuntimeError("poppler-utils is not installed or they are not in PATH")
version = subprocess.check_output(
"pdfinfo -v", shell=True, stderr=subprocess.STDOUT, universal_newlines=True
)
m = re.search(r"\d{1,2}\.\d{2}\.\d", version)
if int(m.group(0).replace(".", "")) < 360:
raise RuntimeError(
f"Installed poppler-utils's version is {m.group(0)}, "
f"but should be 0.36.0 or above"
)
def parse(
self, document_name: str, sentences: Iterable[Sentence]
) -> Iterator[Sentence]:
"""Link visual information with sentences.
:param document_name: the document name.
:param sentences: sentences to be linked with visual information.
:return: A generator of ``Sentence``.
"""
# sentences should be sorted as their order is not deterministic.
self.sentences = sorted(sentences, key=attrgetter("position"))
self.pdf_file = self._get_linked_pdf_path(document_name)
try:
self._extract_pdf_words()
except RuntimeError as e:
logger.exception(e)
return
self._extract_html_words()
self._link_lists(search_max=200)
for sentence in self._update_coordinates():
yield sentence
def _extract_pdf_words(self) -> None:
logger.debug(f"pdfinfo '{self.pdf_file}' | grep -a ^Pages: | sed 's/[^0-9]*//'")
num_pages = subprocess.check_output(
f"pdfinfo '{self.pdf_file}' | grep -a ^Pages: | sed 's/[^0-9]*//'",
shell=True,
)
pdf_word_list: List[PdfWord] = []
coordinate_map: Dict[PdfWordId, Bbox] = {}
for i in range(1, int(num_pages) + 1):
logger.debug(f"pdftotext -f {i} -l {i} -bbox-layout '{self.pdf_file}' -")
html_content = subprocess.check_output(
f"pdftotext -f {i} -l {i} -bbox-layout '{self.pdf_file}' -", shell=True
)
soup = BeautifulSoup(html_content, "html.parser")
pages = soup.find_all("page")
pdf_word_list_i, coordinate_map_i = self._coordinates_from_HTML(pages[0], i)
pdf_word_list += pdf_word_list_i
# update coordinate map
coordinate_map.update(coordinate_map_i)
self.pdf_word_list = pdf_word_list
self.coordinate_map = coordinate_map
if len(self.pdf_word_list) == 0:
raise RuntimeError(
f"Words could not be extracted from PDF: {self.pdf_file}"
)
# take last page dimensions
page_width, page_height = (
int(float(pages[0].get("width"))),
int(float(pages[0].get("height"))),
)
self.pdf_dim = (page_width, page_height)
if self.verbose:
logger.info(f"Extracted {len(self.pdf_word_list)} pdf words")
def _get_linked_pdf_path(self, document_name: str) -> str:
"""Get the pdf file path, return None if it doesn't exist.
:param document_name: a document name.
"""
full_path = os.path.join(self.pdf_path, document_name + ".pdf")
if os.path.isfile(full_path):
return full_path
full_path = os.path.join(self.pdf_path, document_name + ".PDF")
if os.path.isfile(full_path):
return full_path
return None
def is_parsable(self, document_name: str) -> bool:
"""Verify that the file exists and has a PDF extension.
:param document_name: The path to the PDF document.
"""
return False if self._get_linked_pdf_path(document_name) is None else True
def _coordinates_from_HTML(
self, page: Tag, page_num: int
) -> Tuple[List[PdfWord], Dict[PdfWordId, Bbox]]:
pdf_word_list: List[PdfWord] = []
coordinate_map: Dict[PdfWordId, Bbox] = {}
block_coordinates: Dict[PdfWordId, Tuple[int, int]] = {}
blocks = page.find_all("block")
i = 0 # counter for word_id in page_num
for block in blocks:
x_min_block = int(float(block.get("xmin")))
y_min_block = int(float(block.get("ymin")))
lines = block.find_all("line")
for line in lines:
y_min_line = int(float(line.get("ymin")))
y_max_line = int(float(line.get("ymax")))
words = line.find_all("word")
for word in words:
xmin = int(float(word.get("xmin")))
xmax = int(float(word.get("xmax")))
for content in self.separators.split(word.getText()):
if len(content) > 0: # Ignore empty characters
word_id: PdfWordId = (page_num, i)
pdf_word_list.append((word_id, content))
coordinate_map[word_id] = Bbox(
page_num,
y_min_line,
y_max_line,
xmin,
xmax,
)
block_coordinates[word_id] = (y_min_block, x_min_block)
i += 1
# sort pdf_word_list by page, block top then block left, top, then left
pdf_word_list = sorted(
pdf_word_list,
key=lambda word_id__: block_coordinates[word_id__[0]]
+ (coordinate_map[word_id__[0]].top, coordinate_map[word_id__[0]].left),
)
return pdf_word_list, coordinate_map
def _extract_html_words(self) -> None:
html_word_list: List[HtmlWord] = []
for sentence in self.sentences:
for i, word in enumerate(sentence.words):
html_word_list.append(((sentence.stable_id, i), word))
self.html_word_list = html_word_list
if self.verbose:
logger.info(f"Extracted {len(self.html_word_list)} html words")
def _link_lists(
self, search_max: int = 100, edit_cost: int = 20, offset_cost: int = 1
) -> None:
# NOTE: there are probably some inefficiencies here from rehashing words
# multiple times, but we're not going to worry about that for now
def link_exact(l: int, u: int) -> None:
l, u, L, U = get_anchors(l, u)
# Inverted index that maps word to index(es) of html_word_list
html_dict: DefaultDict[str, List[int]] = defaultdict(list)
# Inverted index that maps word to index(es) of pdf_word_list
pdf_dict: DefaultDict[str, List[int]] = defaultdict(list)
for i, (_, word) in enumerate(self.html_word_list[l:u]):
if html_to_pdf[l + i] is None:
html_dict[word].append(l + i)
for j, (_, word) in enumerate(self.pdf_word_list[L:U]):
if pdf_to_html[L + j] is None:
pdf_dict[word].append(L + j)
for word, html_list in list(html_dict.items()):
pdf_list = pdf_dict[word]
if len(html_list) == len(pdf_list):
for k in range(len(html_list)):
html_to_pdf[html_list[k]] = pdf_list[k]
pdf_to_html[pdf_list[k]] = html_list[k]
def link_fuzzy(i: int) -> None:
(_, word) = self.html_word_list[i]
l = u = i
l, u, L, U = get_anchors(l, u)
offset = int(L + float(i - l) / (u - l) * (U - L))
searchIndices = np.clip(offset + search_order, 0, M - 1)
cost = [0] * search_max
for j, k in enumerate(searchIndices):
other = self.pdf_word_list[k][1]
if (
word.startswith(other)
or word.endswith(other)
or other.startswith(word)
or other.endswith(word)
):
html_to_pdf[i] = k
return
else:
cost[j] = int(editdist(word, other)) * edit_cost + j * offset_cost
html_to_pdf[i] = searchIndices[np.argmin(cost)]
return
def get_anchors(l: int, u: int) -> Tuple[int, int, int, int]:
while l >= 0 and html_to_pdf[l] is None:
l -= 1
while u < N and html_to_pdf[u] is None:
u += 1
if l < 0:
l = 0
L = 0
else:
L = html_to_pdf[l]
if u >= N:
u = N
U = M
else:
U = html_to_pdf[u]
return l, u, L, U
def display_match_counts() -> int:
matches = sum(
[
html_to_pdf[i] is not None
and self.html_word_list[i][1]
== self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
]
)
total = len(self.html_word_list)
logger.info(f"({matches}/{total}) = {matches / total:.2f}")
return matches
N = len(self.html_word_list)
M = len(self.pdf_word_list)
try:
assert N > 0 and M > 0
except Exception:
logger.exception(f"N = {N} and M = {M} are invalid values.")
html_to_pdf: List[Optional[int]] = [None] * N
pdf_to_html: List[Optional[int]] = [None] * M
search_radius = search_max // 2
# first pass: global search for exact matches
link_exact(0, N)
if self.verbose:
logger.debug("Global exact matching:")
display_match_counts()
# second pass: local search for exact matches
for i in range(((N + 2) // search_radius) + 1):
link_exact(
max(0, i * search_radius - search_radius),
min(N, i * search_radius + search_radius),
)
if self.verbose:
logger.debug("Local exact matching:")
display_match_counts()
# third pass: local search for approximate matches
search_order = np.array(
[(-1) ** (i % 2) * (i // 2) for i in range(1, search_max + 1)]
)
for i in range(len(html_to_pdf)):
if html_to_pdf[i] is None:
link_fuzzy(i)
if self.verbose:
logger.debug("Local approximate matching:")
display_match_counts()
# convert list to dict
matches = sum(
[
html_to_pdf[i] is not None
and self.html_word_list[i][1] == self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
]
)
total = len(self.html_word_list)
if self.verbose:
logger.debug(
f"Linked {matches}/{total} ({matches / total:.2f}) html words exactly"
)
self.links = OrderedDict(
(self.html_word_list[i][0], self.pdf_word_list[html_to_pdf[i]][0])
for i in range(len(self.html_word_list))
)
def _update_coordinates(self) -> Iterator[Sentence]:
for sentence in self.sentences:
(page, top, bottom, left, right) = list(
zip(
*[
self.coordinate_map[self.links[((sentence.stable_id), i)]]
for i in range(len(sentence.words))
]
)
)
sentence.page = list(page)
sentence.top = list(top)
sentence.left = list(left)
sentence.bottom = list(bottom)
sentence.right = list(right)
yield sentence
if self.verbose:
logger.debug("Updated coordinates in database")
| fonduer-master | src/fonduer/parser/visual_parser/pdf_visual_parser.py |
"""Abstract visual parser."""
from abc import ABC, abstractmethod
from typing import Iterable, Iterator
from fonduer.parser.models import Sentence
class VisualParser(ABC):
"""Abstract visual parer."""
@abstractmethod
def parse(
self,
document_name: str,
sentences: Iterable[Sentence],
) -> Iterator[Sentence]:
"""Parse visual information and link them with given sentences.
:param document_name: the document name.
:param sentences: sentences to be linked with visual information.
:yield: sentences with visual information.
"""
pass
@abstractmethod
def is_parsable(self, document_name: str) -> bool:
"""Check if visual information can be parsed.
:param document_name: the document name.
:return: Whether visual information is parsable.
"""
pass
| fonduer-master | src/fonduer/parser/visual_parser/visual_parser.py |
"""Fonduer's supervision module."""
from fonduer.supervision.labeler import Labeler
__all__ = ["Labeler"]
| fonduer-master | src/fonduer/supervision/__init__.py |
"""Fonduer labeler."""
import logging
from collections import defaultdict
from typing import (
Any,
Callable,
Collection,
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from sqlalchemy import Table
from sqlalchemy.orm import Session
from fonduer.candidates.models import Candidate
from fonduer.parser.models import Document
from fonduer.supervision.models import GoldLabelKey, Label, LabelKey
from fonduer.utils.udf import UDF, UDFRunner
from fonduer.utils.utils_udf import (
ALL_SPLITS,
batch_upsert_records,
drop_all_keys,
drop_keys,
get_docs_from_split,
get_mapping,
get_sparse_matrix,
get_sparse_matrix_keys,
unshift_label_matrix,
upsert_keys,
)
logger = logging.getLogger(__name__)
# Snorkel changed the label convention: ABSTAIN is now represented by -1 (used to be 0).
# Accordingly, user-defined labels should now be 0-indexed (used to be 1-indexed).
# Details can be found at https://github.com/snorkel-team/snorkel/pull/1309
ABSTAIN = -1
class Labeler(UDFRunner):
"""An operator to add Label Annotations to Candidates.
:param session: The database session to use.
:param candidate_classes: A list of candidate_subclasses to label.
:param parallelism: The number of processes to use in parallel. Default 1.
"""
def __init__(
self,
session: Session,
candidate_classes: List[Type[Candidate]],
parallelism: int = 1,
):
"""Initialize the Labeler."""
super().__init__(
session,
LabelerUDF,
parallelism=parallelism,
candidate_classes=candidate_classes,
)
self.candidate_classes = candidate_classes
self.lfs: List[List[Callable]] = []
def update(
self,
docs: Collection[Document] = None,
split: int = 0,
lfs: List[List[Callable]] = None,
parallelism: int = None,
progress_bar: bool = True,
table: Table = Label,
) -> None:
"""Update the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the updated LFs to all the candidates
in these documents.
:param split: If docs is None, apply the updated LFs to the candidates
in this particular split.
:param lfs: A list of lists of labeling functions to update. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:param table: A (database) table labels are written to.
Takes `Label` (by default) or `GoldLabel`.
"""
if lfs is None:
raise ValueError("Please provide a list of lists of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.table = table
self.apply(
docs=docs,
split=split,
lfs=lfs,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
table=table,
)
def apply( # type: ignore
self,
docs: Collection[Document] = None,
split: int = 0,
train: bool = False,
lfs: List[List[Callable]] = None,
clear: bool = True,
parallelism: int = None,
progress_bar: bool = True,
table: Table = Label,
) -> None:
"""Apply the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the LFs to all the candidates in these
documents.
:param split: If docs is None, apply the LFs to the candidates in this
particular split.
:param train: Whether or not to update the global key set of labels and
the labels of candidates.
:param lfs: A list of lists of labeling functions to apply. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:param clear: Whether or not to clear the labels table before applying
these LFs.
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:param table: A (database) table labels are written to.
Takes `Label` (by default) or `GoldLabel`.
:raises ValueError: If labeling functions are not provided for each
candidate class.
"""
if lfs is None:
raise ValueError("Please provide a list of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.lfs = lfs
self.table = table
if docs:
# Call apply on the specified docs for all splits
# TODO: split is int
split = ALL_SPLITS # type: ignore
super().apply(
docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
table=table,
)
# Needed to sync the bulk operations
self.session.commit()
else:
# Only grab the docs containing candidates from the given split.
split_docs = get_docs_from_split(
self.session, self.candidate_classes, split
)
super().apply(
split_docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
table=table,
)
# Needed to sync the bulk operations
self.session.commit()
def get_keys(self) -> List[LabelKey]:
"""Return a list of keys for the Labels.
:return: List of LabelKeys.
"""
return list(get_sparse_matrix_keys(self.session, LabelKey))
def upsert_keys(
self,
keys: Iterable[Union[str, Callable]],
candidate_classes: Optional[
Union[Type[Candidate], List[Type[Candidate]]]
] = None,
) -> None:
"""Upsert the specified keys from LabelKeys.
:param keys: A list of labeling functions to upsert.
:param candidate_classes: A list of the Candidates to upsert the key for.
If None, upsert the keys for all candidate classes associated with
this Labeler.
"""
# Make sure keys is iterable
keys = keys if isinstance(keys, (list, tuple)) else [keys]
# Make sure candidate_classes is iterable
if candidate_classes:
candidate_classes = (
candidate_classes
if isinstance(candidate_classes, (list, tuple))
else [candidate_classes]
)
# Ensure only candidate classes associated with the labeler are used.
candidate_classes = [
_.__tablename__
for _ in candidate_classes
if _ in self.candidate_classes
]
if len(candidate_classes) == 0:
logger.warning(
"You didn't specify valid candidate classes for this Labeler."
)
return
# If unspecified, just use all candidate classes
else:
candidate_classes = [_.__tablename__ for _ in self.candidate_classes]
# build dict for use by utils
key_map = dict()
for key in keys:
# Assume key is an LF
if hasattr(key, "__name__"):
key_map[key.__name__] = set(candidate_classes)
elif hasattr(key, "name"):
key_map[key.name] = set(candidate_classes)
else:
key_map[key] = set(candidate_classes)
upsert_keys(self.session, LabelKey, key_map)
def drop_keys(
self,
keys: Iterable[Union[str, Callable]],
candidate_classes: Optional[
Union[Type[Candidate], List[Type[Candidate]]]
] = None,
) -> None:
"""Drop the specified keys from LabelKeys.
:param keys: A list of labeling functions to delete.
:param candidate_classes: A list of the Candidates to drop the key for.
If None, drops the keys for all candidate classes associated with
this Labeler.
"""
# Make sure keys is iterable
keys = keys if isinstance(keys, (list, tuple)) else [keys]
# Make sure candidate_classes is iterable
if candidate_classes:
candidate_classes = (
candidate_classes
if isinstance(candidate_classes, (list, tuple))
else [candidate_classes]
)
# Ensure only candidate classes associated with the labeler are used.
candidate_classes = [
_.__tablename__
for _ in candidate_classes
if _ in self.candidate_classes
]
if len(candidate_classes) == 0:
logger.warning(
"You didn't specify valid candidate classes for this Labeler."
)
return
# If unspecified, just use all candidate classes
else:
candidate_classes = [_.__tablename__ for _ in self.candidate_classes]
# build dict for use by utils
key_map = dict()
for key in keys:
# Assume key is an LF
if hasattr(key, "__name__"):
key_map[key.__name__] = set(candidate_classes)
elif hasattr(key, "name"):
key_map[key.name] = set(candidate_classes)
else:
key_map[key] = set(candidate_classes)
drop_keys(self.session, LabelKey, key_map)
def _add(self, session: Session, records_list: List[List[Dict[str, Any]]]) -> None:
for records in records_list:
batch_upsert_records(session, self.table, records)
def clear( # type: ignore
self,
train: bool,
split: int,
lfs: Optional[List[List[Callable]]] = None,
table: Table = Label,
**kwargs: Any,
) -> None:
"""Delete Labels of each class from the database.
:param train: Whether or not to clear the LabelKeys.
:param split: Which split of candidates to clear labels from.
:param lfs: This parameter is ignored.
:param table: A (database) table labels are cleared from.
Takes `Label` (by default) or `GoldLabel`.
"""
# Clear Labels for the candidates in the split passed in.
logger.info(f"Clearing Labels (split {split})")
if split == ALL_SPLITS:
sub_query = self.session.query(Candidate.id).subquery()
else:
sub_query = (
self.session.query(Candidate.id)
.filter(Candidate.split == split)
.subquery()
)
query = self.session.query(table).filter(table.candidate_id.in_(sub_query))
query.delete(synchronize_session="fetch")
# Delete all old annotation keys
if train:
key_table = LabelKey if table == Label else GoldLabelKey
logger.debug(
f"Clearing all {key_table.__name__}s from {self.candidate_classes}..."
)
drop_all_keys(self.session, key_table, self.candidate_classes)
def clear_all(self, table: Table = Label) -> None:
"""Delete all Labels.
:param table: A (database) table labels are cleared from.
Takes `Label` (by default) or `GoldLabel`.
"""
key_table = LabelKey if table == Label else GoldLabelKey
logger.info(f"Clearing ALL {table.__name__}s and {key_table.__name__}s.")
self.session.query(table).delete(synchronize_session="fetch")
self.session.query(key_table).delete(synchronize_session="fetch")
def _after_apply(
self, train: bool = False, table: Table = Label, **kwargs: Any
) -> None:
# Insert all Label Keys
if train:
key_map: DefaultDict[str, set] = defaultdict(set)
for label in self.session.query(table).all():
cand = label.candidate
for key in label.keys:
key_map[key].add(cand.__class__.__tablename__)
key_table = LabelKey if table == Label else GoldLabelKey
self.session.query(key_table).delete(synchronize_session="fetch")
# TODO: upsert is too much. insert is fine as all keys are deleted.
upsert_keys(self.session, key_table, key_map)
def get_gold_labels(
self, cand_lists: List[List[Candidate]], annotator: Optional[str] = None
) -> List[np.ndarray]:
"""Load dense matrix of GoldLabels for each candidate_class.
:param cand_lists: The candidates to get gold labels for.
:param annotator: A specific annotator key to get labels for. Default
None.
:raises ValueError: If get_gold_labels is called before gold labels are
loaded, the result will contain ABSTAIN values. We raise a
ValueError to help indicate this potential mistake to the user.
:return: A list of MxN dense matrix where M are the candidates and N is the
annotators. If annotator is provided, return a list of Mx1 matrix.
"""
gold_labels = [
unshift_label_matrix(m)
for m in get_sparse_matrix(
self.session, GoldLabelKey, cand_lists, key=annotator
)
]
for cand_labels in gold_labels:
if ABSTAIN in cand_labels:
raise ValueError(
"Gold labels contain ABSTAIN labels. "
"Did you load gold labels beforehand?"
)
return gold_labels
def get_label_matrices(self, cand_lists: List[List[Candidate]]) -> List[np.ndarray]:
"""Load dense matrix of Labels for each candidate_class.
:param cand_lists: The candidates to get labels for.
:return: A list of MxN dense matrix where M are the candidates and N is the
labeling functions.
"""
return [
unshift_label_matrix(m)
for m in get_sparse_matrix(self.session, LabelKey, cand_lists)
]
class LabelerUDF(UDF):
"""UDF for performing candidate extraction."""
def __init__(
self,
candidate_classes: Union[Type[Candidate], List[Type[Candidate]]],
**kwargs: Any,
):
"""Initialize the LabelerUDF."""
self.candidate_classes = (
candidate_classes
if isinstance(candidate_classes, (list, tuple))
else [candidate_classes]
)
super().__init__(**kwargs)
def _f_gen(self, c: Candidate) -> Iterator[Tuple[int, str, int]]:
"""Convert lfs into a generator of id, name, and labels.
In particular, catch verbose values and convert to integer ones.
"""
lf_idx = self.candidate_classes.index(c.__class__)
labels = lambda c: [
(
c.id,
lf.__name__ if hasattr(lf, "__name__") else lf.name, # type: ignore
lf(c),
)
for lf in self.lfs[lf_idx]
]
for cid, lf_key, label in labels(c):
# Note: We assume if the LF output is an int, it is already
# mapped correctly
if isinstance(label, int):
yield cid, lf_key, label + 1 # convert to {0, 1, ..., k}
# None is a protected LF output value corresponding to ABSTAIN,
# representing LF abstaining
elif label is None:
yield cid, lf_key, ABSTAIN + 1 # convert to {0, 1, ..., k}
elif label in c.values:
# convert to {0, 1, ..., k}
yield cid, lf_key, c.values.index(label) + 1
else:
raise ValueError(
f"Can't parse label value {label} for candidate values {c.values}"
)
def apply( # type: ignore
self,
doc: Document,
lfs: List[List[Callable]],
table: Table = Label,
**kwargs: Any,
) -> List[List[Dict[str, Any]]]:
"""Extract candidates from the given Context.
:param doc: A document to process.
:param lfs: The list of functions to use to generate labels.
"""
logger.debug(f"Document: {doc}")
if lfs is None:
raise ValueError("Must provide lfs kwarg.")
self.lfs = lfs
# Get all the candidates in this doc that will be labeled
cands_list = [
getattr(doc, candidate_class.__tablename__ + "s")
for candidate_class in self.candidate_classes
]
records_list = [
list(get_mapping(table, cands, self._f_gen)) for cands in cands_list
]
return records_list
| fonduer-master | src/fonduer/supervision/labeler.py |
"""Fonduer's supervision model module."""
from fonduer.supervision.models.label import (
GoldLabel,
GoldLabelKey,
Label,
LabelKey,
StableLabel,
)
__all__ = ["GoldLabel", "GoldLabelKey", "Label", "LabelKey", "StableLabel"]
| fonduer-master | src/fonduer/supervision/models/__init__.py |
"""Fonduer label model."""
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects import postgresql
from fonduer.meta import Meta
from fonduer.utils.models.annotation import AnnotationKeyMixin, AnnotationMixin
class GoldLabelKey(AnnotationKeyMixin, Meta.Base):
"""Gold label key class.
A gold label's key that identifies the annotator of the gold label.
"""
pass
class GoldLabel(AnnotationMixin, Meta.Base):
"""Gold label class.
A separate class for labels from human annotators or other gold standards.
"""
#: A list of integer values for each Key.
values = Column(postgresql.ARRAY(Integer), nullable=False)
class LabelKey(AnnotationKeyMixin, Meta.Base):
"""Label key class.
A label's key that identifies the labeling function.
"""
pass
class Label(AnnotationMixin, Meta.Base):
"""Label class.
A discrete label associated with a Candidate, indicating a target prediction value.
Labels are used to represent the output of labeling functions. A Label's
annotation key identifies the labeling function that provided the Label.
"""
#: A list of integer values for each Key.
values = Column(postgresql.ARRAY(Integer), nullable=False)
class StableLabel(Meta.Base):
"""Stable label table.
A special secondary table for preserving labels created by *human
annotators* in a stable format that does not cascade, and is independent of
the Candidate IDs.
.. note:: This is currently unused.
"""
__tablename__ = "stable_label"
#: Delimited list of the context stable ids.
context_stable_ids = Column(
String, primary_key=True
) # ~~ delimited list of the context stable ids
#: The annotator's name
annotator_name = Column(String, primary_key=True)
#: Which split the label belongs to
split = Column(Integer, default=0)
# The value of the label
value = Column(Integer, nullable=False)
def __repr__(self) -> str:
"""Represent the stable label as a string."""
return f"{self.__class__.__name__} ({self.annotator_name} : {self.value})"
| fonduer-master | src/fonduer/supervision/models/label.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="ctrl-benchmark",
version="0.0.3",
author="Tom Veniat, Ludovic Denoyer & Marc'Aurelio Ranzato",
license="MIT License",
description="Continual Transfer Learning Benchmark",
packages=setuptools.find_packages(),
install_requires=[
'pyyaml',
'torch>=1.3,<2',
'torchvision<1',
'networkx>2,<3',
'plotly',
'pydot',
'tqdm',
'sklearn',
'bs4'
],
include_package_data=True,
) | CTrLBenchmark-master | setup.py |
from .streams import get_stream | CTrLBenchmark-master | ctrl/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
from collections import defaultdict
import torch
import torchvision
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
class Task(object):
def __init__(self, name, samples, loss_fn, transformation=None,
split_names=None, creator=None, source_concepts=None,
attributes=None, dim_red='PCA', generator=None,
n_samples_per_class=None, save_path=None, id=None):
"""
:param samples: Iterable containing the data and labels for each split. The length corresponds to the number of
splits. Each split i should be composed of two Tensors:
- a `(N_i x ...)` tensor containing the features of the N_i samples for this splits
- a `(N_i x n_labels)` tensor containing the labels for each attribute that we want to classify. The attributes in different splits are not forced to overlap, allowing to generate ZSL tasks.
:param transformation:
:param creator:
"""
self._infos = {
'src_concepts': [] if source_concepts is None else source_concepts,
'transformation': transformation,
'attributes': attributes
}
self.name = name
self.save_path = None
self.loss_fn = loss_fn
self.id = id
self.split_names = split_names
self.datasets = [TensorDataset(s_samples, labels.long()) for
s_samples, labels in samples]
self.n_classes = [dataset.tensors[1].max(dim=0).values + 1 for dataset
in self.datasets]
self.x_dim = list(self.datasets[0].tensors[0].size()[1:])
assert all(list(split.tensors[0].size()[1:]) == self.x_dim for split in
self.datasets)
self.n_samples = [dataset.tensors[0].size(0) for dataset in
self.datasets]
self.n_samples_per_class = n_samples_per_class
assert all([torch.equal(self.n_classes[0], t) for t in self.n_classes])
self.n_classes = self.n_classes[0]
self._dim_reduction = PCA(n_components=3) \
if dim_red == 'PCA' else TSNE(n_components=3)
self.creator = creator
# self.generator = generator
self.statistics = self.compute_statistics()
if save_path:
self.save_path = self.save(save_path)
def compute_statistics(self):
train_split = self.datasets[0].tensors[0]
if train_split[0].dim() == 3:
# Images
# assert train_split.size(1) == 3
n_channels = train_split.size(1)
mean = [train_split[:, i, :, :].mean() for i in range(n_channels)]
std = [train_split[:, i, :, :].std() for i in range(n_channels)]
else:
# Vectors
mean = train_split.mean()
std = train_split.std()
# Prevent division by 0 if we have a constant channel
std = [1 if itm == 0 else itm for itm in std]
return {'mean': mean, 'std': std}
@property
def concepts(self):
return [concept for cat_concepts in self.src_concepts for concept in
cat_concepts]
@property
def transformation(self):
return self._infos['transformation']
@property
def src_concepts(self):
"""
:return: A copy of the concepts list of this task
"""
return self._infos['src_concepts'].copy()
@property
def attributes(self):
return self._infos['attributes']
def get_data(self, split:str):
"""
:param split:
:type split:
:return:
:rtype:
"""
return self.datasets[split].tensors[0]
def get_labels(self, split, prop):
return self.datasets[split].tensors[1][:, prop]
def plot_task(self, viz, name):
legend = [str(c) for c in self.src_concepts]
selected_means = []
cat_ids = []
for cat_id, cat in enumerate(self.src_concepts):
for c in cat:
if hasattr(c, 'mean'):
selected_means.append(c.mean)
cat_ids.append(cat_id + 1)
if len(selected_means) > 2:
data = torch.stack(selected_means)
title = '{} selected concepts'.format(name)
if selected_means[0].numel() > 3:
title = '{} of {}'.format(
self._dim_reduction.__class__.__name__, title)
data = self._dim_reduction.fit_transform(data)
viz.scatter(data, Y=cat_ids,
opts={'title': title, 'markersize': 3,
'legend': legend})
plot_data = self.get_data(split=0)
title = '{} features'.format(name)
if plot_data[0].ndimension() == 3 and plot_data[0].size(0) in [1, 3]:
# We have an image
imgs_per_label = defaultdict(list)
for ds in self.datasets:
x, y = ds.tensors
y = y.squeeze()
for y_val in y.unique():
x_sample = random.choice(x[y == y_val])
imgs_per_label[y_val.item()].append(x_sample)
for y, images in imgs_per_label.items():
grid = torchvision.utils.make_grid(images)
viz.image(grid, opts={
'title': '{} ({})'.format(self.src_concepts[y], y),
'width': grid.size(2) * 3,
'height': grid.size(1) * 3.2})
else:
# Vectorial data
if plot_data[0].numel() > 3:
plot_data = self._dim_reduction.fit_transform(
plot_data.view(plot_data.size(0), -1))
title = '{} of {}'.format(
self._dim_reduction.__class__.__name__, title)
viz.scatter(plot_data, Y=self.get_labels(split=0, prop=0) + 1,
opts={'title': title, 'webgl': True, 'markersize': 3,
'legend': legend})
def save(self, path):
if not os.path.isdir(path):
os.makedirs(path)
task_datasets = []
save_paths = []
for split_data, split_name in zip(self.datasets,
['train', 'val', 'test']):
save_path = os.path.join(path,
'{}_{}.pth'.format(self.name, split_name))
save_paths.append(save_path)
torch.save(split_data.tensors, save_path)
task_datasets.append(save_path)
logger.info('Task saved to {} ...'.format(save_paths))
metadata_file = os.path.join(path, '{}.meta'.format(self.name))
torch.save(self._meta(), metadata_file)
return task_datasets
def _meta(self):
meta = {
'source_concepts': [tuple(str(c) for c in cat) for cat in
self.src_concepts],
'transformation': str(self.transformation),
'creator': self.creator
}
return meta
def info(self, full=True):
infos = {
'data_path': self.save_path,
'split_names': self.split_names,
'id': self.id,
'x_dim': self.x_dim,
'n_classes': self.n_classes.tolist(),
'descriptor': self.name,
'full_descr': str(self),
}
if full:
infos['loss_fn'] = self.loss_fn
infos['statistics'] = self.statistics
return infos
def __repr__(self):
return "{}-way classification".format(len(self.src_concepts))
def __str__(self):
categories = '\n\t-'.join([str(c) for c in self.src_concepts])
descr = "{}-way classification created by {} ({} samples): \n\t {} \n\t-{}"
trans_descr = self.transformation
return descr.format(self.n_classes[0].item(), self.creator,
self.n_samples, trans_descr, categories)
def __eq__(self, other):
return all(
map(lambda x: torch.equal(*x),
zip(self.datasets[0].tensors,
other.datasets[0].tensors)))
| CTrLBenchmark-master | ctrl/tasks/task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CTrLBenchmark-master | ctrl/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import time
from types import SimpleNamespace
import numpy as np
import torch
import torch.nn.functional as F
from ctrl.concepts.concept import ComposedConcept
from ctrl.concepts.concept_tree import ConceptTree
from ctrl.tasks.task import Task
from torchvision import transforms
logger = logging.getLogger(__name__)
def loss(y_hat, y, reduction: str = 'none'):
"""
:param y_hat: Model predictions
:param y: Ground Truth
:param reduction:
:return:
"""
assert y.size(1) == 1 and torch.is_tensor(y_hat)
y = y.squeeze(1)
loss_val = F.cross_entropy(y_hat, y, reduction=reduction)
assert loss_val.dim() == 1
return loss_val
def augment_samples(samples):
trans = transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor()
])
aug_samples = []
for sample in samples:
for i in range(4):
aug_samples.append(trans(sample))
for sample in samples:
aug_samples.append(transforms.ToTensor()(transforms.ToPILImage()(sample)))
return torch.stack(aug_samples)
def _generate_samples_from_descr(categories, attributes, n_samples_per_class,
augment, rnd):
use_cat_id, attributes = attributes
assert use_cat_id and not attributes, \
"usage of attributes isn't supporte in v1."
samples = []
labels = []
for i, cat_concepts in enumerate(categories):
mixture = ComposedConcept(cat_concepts, id=None)
cat_samples = []
cat_labels = []
for s_id, n in enumerate(n_samples_per_class):
split_samples, split_attrs = mixture._get_samples(n, attributes,
split_id=s_id, rng=rnd)
if s_id in augment:
split_samples = augment_samples(split_samples)
split_labels = torch.Tensor().long()
cat_id = torch.tensor([i]).expand(split_samples.shape[0], 1)
split_labels = torch.cat([split_labels, cat_id], dim=1)
cat_samples.append(split_samples)
cat_labels.append(split_labels)
samples.append(cat_samples)
labels.append(cat_labels)
if torch.is_tensor(samples[0][0]):
cat_func = torch.cat
else:
cat_func = np.concatenate
samples = (cat_func(split) for split in zip(*samples))
labels = (torch.cat(split) for split in zip(*labels))
return samples, labels
class TaskGenIter(object):
def __init__(self, task_generator):
self.task_gen = task_generator
self.n = 0
def __next__(self):
if len(self.task_gen.task_pool) > self.n:
t = self.task_gen.task_pool[self.n]
else:
assert self.n == len(self.task_gen.task_pool)
try:
t = self.task_gen.add_task()
except IndexError:
raise StopIteration
self.n += 1
return t
class TaskGenerator(object):
def __init__(self, concept_pool: ConceptTree, transformation_pool,
samples_per_class, split_names, strat,
seed: int, flatten, n_initial_classes, use_cat_id, tta,
*args, **kwargs):
"""
:param concepts: Concept pool from which we will sample when creating
new tasks.
:param transformation_pool: Transformation pool from which we will
select the operations to be applied on the data of new tasks.
:param samples_per_class: Initial number of samples per class
:param split_names: Name of the different data splits usually
(train, val, test)
:param strat: Strategy to use for the creation of new tasks
:param seed: The seed used for the samples selection
:param flatten:
:param n_initial_classes:
:param use_cat_id: Legacy prop used with attributes.
:param tta: use Test Time Augmentation
"""
super(TaskGenerator, self).__init__(*args, **kwargs)
self.task_pool = []
self.concept_pool = concept_pool
self.transformation_pool = transformation_pool
assert len(samples_per_class) == len(split_names)
self.n_samples_per_class = samples_per_class
self.split_names = split_names
self.rnd = random.Random(seed)
self.flatten = flatten
self.tta = tta
# For default task creation
self.n_initial_classes = n_initial_classes
self.use_cat_id = use_cat_id
self.strat = strat
self.contains_loaded_tasks = False
@property
def n_tasks(self):
return len(self.task_pool)
def add_task(self, name=None, save_path=None):
"""
Adds a new task to the current pool.
This task will be created using the current strategy `self.strat`
:param name: The name of the new task
:param save_path: If provided, the task will be saved under this path
:return: The new Task
"""
new_task_id = len(self.task_pool)
if new_task_id == 0:
concepts, attrs, trans, n = self._create_new_task(
self.concept_pool, self.transformation_pool)
else:
concepts = self.task_pool[-1].src_concepts
attrs = self.task_pool[-1].attributes
trans = self.task_pool[-1].transformation
n = self.task_pool[-1].n_samples_per_class
cur_task_spec = SimpleNamespace(src_concepts=concepts,
attributes=attrs,
transformation=trans,
n_samples_per_class=n,
)
cur_task_spec = self.strat.new_task(cur_task_spec, self.concept_pool,
self.transformation_pool,
self.task_pool)
assert len(cur_task_spec.n_samples_per_class) == len(self.split_names)
new_task = self._create_task(cur_task_spec, name, save_path)
new_task.id = new_task_id
self.task_pool.append(new_task)
return new_task
def load_task(self, task_name, load_path):
splits = ['train', 'val', 'test']
samples = []
save_paths = []
for split in splits:
file_path = os.path.join(load_path, '{}_{}.pth'.format(task_name, split))
save_paths.append(file_path)
assert os.path.isfile(file_path), file_path
xs, ys = torch.load(file_path)
samples.append((xs, ys))
metadata_file = os.path.join(load_path, '{}.meta'.format(task_name))
if os.path.isfile(metadata_file):
meta = torch.load(metadata_file)
else:
meta = {}
task = Task(task_name, samples, loss, split_names=self.split_names,
id=len(self.task_pool), **meta)
task.save_path = save_paths
self.task_pool.append(task)
self.contains_loaded_tasks = True
return task
def _create_task(self, task_spec, name, save_path):
concepts = task_spec.src_concepts
attributes = task_spec.attributes
transformation = task_spec.transformation
n_samples_per_class = task_spec.n_samples_per_class
samples = self.get_samples(concepts, attributes, transformation,
n_samples_per_class)
if self.flatten:
samples = [(x.view(x.size(0), -1), y) for x, y in samples]
task = Task(name, samples, loss, transformation, self.split_names,
source_concepts=concepts, attributes=attributes,
creator=self.strat.descr(), generator=self,
n_samples_per_class=n_samples_per_class,
save_path=save_path)
return task
def get_similarities(self, component=None):
"""
:param component: String representing the components across which the
similarities should be computed, can be any combination of :
- 'x' for p(x|z)
- 'y' for p(y|z)
- 'z' for p(z)
:return: A dict associating each component to an n_tasks x n_tasks
tensor containing the similarities between tasks over this component.
"""
if component is None:
component = 'xyz'
similarities = torch.zeros(self.n_tasks, self.n_tasks, len(component))
times = torch.zeros(len(component))
for i, t1 in enumerate(self.task_pool):
for j, t2 in enumerate(self.task_pool[i:]):
sim, time = self.get_similarity(t1, t2, component)
sim = torch.tensor(sim)
# Similarities are symmetric
similarities[i, i + j] = sim
similarities[i + j, i] = sim
times += torch.tensor(time)
for comp, time in zip(component, times.unbind()):
if time > 1:
logger.warning(
"Comparison of {} took {:4.2f}s".format(comp, time))
sim_dict = dict(zip(component, similarities.unbind(-1)))
return sim_dict
def get_similarity(self, t1, t2, component=None):
if component is None:
component = 'xyz'
res = []
times = []
for char in component:
start_time = time.time()
if char == 'x':
res.append(self.transformation_pool.transformations_sim(
t1.transformation, t2.transformation))
elif char == 'y':
res.append(self.concept_pool.y_attributes_sim(t1.attributes,
t2.attributes))
elif char == 'z':
res.append(self.concept_pool.categories_sim(t1.src_concepts,
t2.src_concepts))
else:
raise ValueError('Unknown component {}'.format(char))
times.append(time.time() - start_time)
return res, times
def get_samples(self, concepts, attributes, transformation,
n_samples_per_class):
augment = [1] if self.tta else []
samples, labels = _generate_samples_from_descr(concepts, attributes,
n_samples_per_class,
augment, np.random.default_rng(self.rnd.randint(0, int(1e9))))
# Apply the input transformation
samples = [transformation(x) for x in samples]
return [(x, y) for x, y in zip(samples, labels)]
def stream_infos(self, full=True):
"""
return a list containing the information of each task in the task_pool,
useful when the stream needs to be serialized (e.g. to be sent to
workers.)
"""
return [t.info(full) for t in self.task_pool]
def _create_new_task(self, concept_pool, transformation_pool, n_attributes=0):
logger.info('Creating new task from scratch')
concepts = concept_pool.get_compatible_concepts(self.n_initial_classes,
leaf_only=True,)
n_avail_attrs = len(concept_pool.attributes)
if n_attributes > n_avail_attrs:
raise ValueError('Can\'t select {} attributes, only {} available'
.format(n_attributes, n_avail_attrs))
attributes = self.rnd.sample(range(n_avail_attrs), n_attributes)
transformation = transformation_pool.get_transformation()
concepts = [(c,) for c in concepts]
return concepts, (self.use_cat_id, attributes), transformation, \
self.n_samples_per_class
def __str__(self):
descr = "Task stream containing {} tasks:\n\t".format(self.n_tasks)
tasks = '\n\t'.join(map(str, self.task_pool))
return descr + tasks
def __iter__(self):
return TaskGenIter(self)
| CTrLBenchmark-master | ctrl/tasks/task_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from torch import nn
from tqdm import tqdm
class NoisyNNTransformationTree(TransformationTree):
def __init__(self, noise_min, noise_max, x_dim, z_dim, n_canonic_transfo,
n_var_per_trans, *args, **kwargs):
self.noise_min = noise_min
self.noise_max = noise_max
self.x_dim = x_dim
self.z_dim = z_dim
self.n_canonic_transfo = n_canonic_transfo
self.n_var_per_trans = n_var_per_trans
self.depth = 2
super().__init__(*args, **kwargs)
self._inv_index = {v: k for k, v in self._node_index.items()}
def build_tree(self):
first_module = nn.Sequential(nn.Linear(self.z_dim, self.z_dim),
nn.ReLU())
# node_name = '{}{}'.format(self.name, 'front')
node_name = 'front'
self.tree.add_node(self._node_index[self.name], name=self.name)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name], f=first_module)
noise_source = torch.distributions.uniform.Uniform(self.noise_min,
self.noise_max)
for i in tqdm(range(self.n_canonic_transfo), desc='Init noisy x',
disable=self.n_canonic_transfo < 30):
lin = nn.Linear(self.z_dim, self.x_dim)
for j in range(self.n_var_per_trans):
mod = mod_lin(lin, noise_source)
node_name = (i, j)
self.tree.add_node(self._node_index[node_name], name=str(node_name))
self.tree.add_edge(self._node_index['front'],
self._node_index[node_name],
f=nn.Sequential(mod, nn.ReLU()))
self.leaf_nodes.add(self._node_index[node_name])
return self._node_index[self.name]
def transformations_sim(self, t1, t2):
t1 = self._inv_index[t1.path[-1]]
t2 = self._inv_index[t2.path[-1]]
return 0 if t1[0] != t2[0] else 1
def mod_lin(lin, noise_source):
noise = noise_source.sample(lin.weight.size())
new_lin = nn.Linear(lin.in_features, lin.out_features)
state_dict = lin.state_dict()
state_dict['weight'] = state_dict['weight'] + noise
new_lin.load_state_dict(state_dict)
return new_lin
| CTrLBenchmark-master | ctrl/transformations/noisy_nn_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Transformation(object):
def __init__(self, transfo_pool, path, trans_descr):
assert path[0] == transfo_pool.root_node
self.transfo_pool = transfo_pool
self.path = path
self.trans_descr = trans_descr
def __call__(self, X):
with torch.no_grad():
for u, v in zip(self.path, self.path[1:]):
f = self.transfo_pool.tree.edges()[u, v]['f']
X = f(X)
return X
def __str__(self):
return self.trans_descr | CTrLBenchmark-master | ctrl/transformations/transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision import transforms
from torchvision.transforms import RandomAffine
ROTATIONS = {
'0': 0,
# '90': 90,
# '180': 180,
# '270': 270
}
COLORS = [[255, 0, 0], [0, 255, 0], [0, 0, 255]]
OLD_BACKGOUND = [0]
SCALES = {
'full': 1,
# '3/4': 0.75,
# 'half': 0.5,
# '1/4': 0.25
}
def get_rotations():
transformations = {}
for name, angle in ROTATIONS.items():
trans = transforms.Compose([
transforms.ToPILImage(),
RandomAffine(degrees=(angle, angle)),
transforms.ToTensor()
])
transformations[name] = BatchedTransformation(trans)
return transformations
def get_scales():
transformations = {}
for name, scale in SCALES.items():
trans = transforms.Compose([
transforms.ToPILImage(),
RandomAffine(degrees=0, scale=(scale, scale)),
transforms.ToTensor()
])
transformations[name] = BatchedTransformation(trans)
return transformations
def change_background_color(images, old_background, new_background):
"""
:param images: BCHW
:return:
"""
assert old_background == [0]
if not torch.is_tensor(new_background):
new_background = torch.tensor(new_background, dtype=images.dtype)
if images.max() <= 1 and new_background.max() > 1:
new_background /= 255
if images.size(1) == 1 and len(new_background) == 3:
images = images.expand(-1, 3, -1, -1)
else:
assert images.size(1) == len(new_background)
# raise NotImplementedError(images.size(), new_background)
images = images.clone()
new_background = new_background.view(-1, 1, 1)
bg_ratio = images.max() - images
bg = bg_ratio * new_background
imgs = images + bg
# print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item())
# print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item())
return imgs
def get_colors():
transformations = {}
for color in COLORS:
trans = partial(change_background_color, old_background=OLD_BACKGOUND,
new_background=color)
transformations[str(color)] = trans
return transformations
class RainbowTransformationTree(TransformationTree):
def __init__(self, *args, **kwargs):
self.n_rotations = None
self.n_colors = None
self.n_scaless = None
super(RainbowTransformationTree, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
rotations = get_rotations()
colors = get_colors()
scales = get_scales()
levels = [rotations, scales, colors]
prev_nodes = [self.name]
for domain in levels:
prev_nodes = self._add_transfos(prev_nodes, domain)
self.leaf_nodes.update([self._node_index[node] for node in prev_nodes])
self.depth = len(levels)
return self._node_index[self.name]
def _add_transfos(self, parent_nodes, transfos):
nodes = []
for parent in parent_nodes:
for name, transfo in transfos.items():
node_name = '{}_{}'.format(parent, name)
self.tree.add_node(self._node_index[node_name], name=node_name,
last_transfo=name)
self.tree.add_edge(self._node_index[parent],
self._node_index[node_name],
f=transfo, )
nodes.append(node_name)
return nodes
def transformations_sim(self, t1, t2):
"""
arccos((tr(R)−1)/2)
:param t1:
:param t2:
:return:
"""
t1_nodes = [t1.transfo_pool.tree.nodes()[id]['last_transfo'] for id in
t1.path[1:]]
t2_nodes = [t2.transfo_pool.tree.nodes()[id]['last_transfo'] for id in
t2.path[1:]]
n_eq = 0
for op1, op2 in zip(t1_nodes, t2_nodes):
if op1 == op2:
n_eq += 1
return n_eq / (len(t1_nodes))
| CTrLBenchmark-master | ctrl/transformations/rainbow_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
class TransformationPool(abc.ABC):
@abc.abstractmethod
def get_transformation(self, exclude_trans=None):
raise NotImplementedError
@abc.abstractmethod
def transformations_sim(self, t1, t2):
raise NotImplementedError
| CTrLBenchmark-master | ctrl/transformations/transformation_pool.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ctrl.transformations.transformation_tree import TransformationTree
from torchvision import transforms
from torchvision.transforms import RandomRotation
class ImgRotationTransformationTree(TransformationTree):
def __init__(self, n_rotations, max_degrees, *args, **kwargs):
self.n_rotations = n_rotations
self.max_degrees = max_degrees
super(ImgRotationTransformationTree, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
for i in range(self.n_rotations):
node_name = 'rotate_{}'.format(i)
self.leaf_nodes.add(self._node_index[node_name])
degrees = self.rnd.uniform(-self.max_degrees, self.max_degrees)
trans = transforms.Compose([
transforms.ToPILImage(),
RandomRotation((degrees, degrees)),
transforms.ToTensor()
])
f = BatchedTransformation(trans)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name],
f=f, degrees=degrees)
self.depth = 1
return self._node_index[self.name]
def transformations_sim(self, t1, t2):
"""
arccos((tr(R)−1)/2)
:param t1:
:param t2:
:return:
"""
theta_1 = self.tree.in_edges()[t1.path[-2:]]['degrees']
theta_2 = self.tree.in_edges()[t2.path[-2:]]['degrees']
theta = abs(theta_1 - theta_2) * np.pi/180
min_angle = np.arccos(np.cos(theta))
return 1 - min_angle / np.pi
| CTrLBenchmark-master | ctrl/transformations/img_rotations.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .identity_transformation import IdentityTransformation
from .img_rotations import ImgRotationTransformationTree
from .noisy_nn_transformation import NoisyNNTransformationTree
from .rainbow_transformation import RainbowTransformationTree
from .randperm_transformation import RandomPermutationsTransformation
from .transformation_tree import RandomNNTransformationTree | CTrLBenchmark-master | ctrl/transformations/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision.transforms import transforms
def load_or_convert_to_image(img):
if isinstance(img, str):
img = Image.open(img).convert('RGB')
elif isinstance(img, torch.Tensor) or isinstance(img, np.ndarray):
img = F.to_pil_image(img)
assert isinstance(img, Image.Image)
return img
def crop_if_not_square(img, max_size=72):
if min(img.size) > max_size:
img = F.resize(img, max_size, Image.BILINEAR)
if img.size[0] != img.size[1]:
img = F.center_crop(img, min(img.size))
return img
class IdentityTransformation(TransformationTree):
def __init__(self, format_image, *args, **kwargs):
self.format_image = format_image
super(IdentityTransformation, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
node_name = 'Id'
self.leaf_nodes.add(self._node_index[node_name])
self.tree.add_node(self._node_index[node_name], name=node_name)
if self.format_image:
trans = transforms.Compose([
load_or_convert_to_image,
# transforms.ToPILImage(),
crop_if_not_square,
transforms.ToTensor()
])
f = BatchedTransformation(trans)
else:
f = lambda x: x
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name],
f=f)
self.depth = 1
return self._node_index[self.name]
| CTrLBenchmark-master | ctrl/transformations/identity_transformation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class BatchedTransformation(object):
def __init__(self, transfo, descr=None):
self.transfo = transfo
self.descr = descr
def __call__(self, batch):
if torch.is_tensor(batch):
batch = batch.unbind(0)
res = [self.transfo(elt) for elt in batch]
return torch.stack(res, 0)
def __str__(self):
if self.descr is None:
return super().__str__()
else:
return self.descr | CTrLBenchmark-master | ctrl/transformations/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from abc import ABC
from collections import defaultdict
from numbers import Number
import networkx as nx
from torch import nn
from ctrl.commons.tree import Tree
from ctrl.transformations.transformation import Transformation
from ctrl.transformations.transformation_pool import TransformationPool
logger = logging.getLogger(__name__)
class TransformationTree(TransformationPool, Tree, ABC):
def __init__(self, *args, **kwargs):
self._node_index = defaultdict()
self._node_index.default_factory = self._node_index.__len__
super().__init__(*args, **kwargs)
def get_transformation(self, exclude_trans=None, allowed_trans=None):
if exclude_trans is None:
exclude_trans = []
exclude_nodes = [trans.path[-1] for trans in exclude_trans]
if allowed_trans is not None:
allowed_nodes = set(trans.path[-1] for trans in allowed_trans)
else:
allowed_nodes = None
node = self.get_compatible_nodes(exclude_nodes=exclude_nodes,
force_nodes=allowed_nodes,
leaf_only=True)
all_paths = list(nx.all_simple_paths(self.tree, self.root_node, node))
selected_path = random.choice(all_paths)
path_descr = self.get_path_descr(selected_path)
return Transformation(self, selected_path, path_descr)
def transformations_sim(self, t1, t2):
return self.wu_palmer(t1.path[-1], t2.path[-1])
def edit_transformation(self, transformation, min_dist, max_dist):
dist = random.randint(min_dist, max_dist)
old_path = transformation.path.copy()
old_path = old_path[:-dist]
new_candidates = list(nx.all_simple_paths(self.tree, old_path[-1],
self.out_nodes))
selected_path = random.choice(new_candidates)
new_path = old_path + selected_path[1:]
return Transformation(self, new_path)
def get_path_descr(self, path):
return '->'.join([self.tree.nodes[itm]['name'] for itm in path])
class RandomNNTransformationTree(TransformationTree):
def __init__(self, depth, degree, x_dim, z_dim, non_lin, *args, **kwargs):
self.depth = depth
self.n_children = self._format_property(degree)
self.hidden_sizes = self._format_property(x_dim)
self.z_dim = z_dim
if non_lin == 'relu':
self.non_linearity = nn.ReLU
elif non_lin == 'tanh':
self.non_linearity = nn.Tanh
super().__init__(*args, **kwargs)
def _format_property(self, prop):
if isinstance(prop, Number):
prop = [prop]
if len(prop) == 1:
prop = prop * self.depth
assert len(prop) == self.depth
return prop
def build_tree(self):
self._build_tree(self.depth-1, self.n_children, self.name, self.z_dim, self.hidden_sizes)
self.tree.add_node(self._node_index[self.name], name=self.name)
return self._node_index[self.name]
def _build_tree(self, depth, n_children, parent_node, parent_dim, hidden_dims):
for i in range(n_children[0]):
module = nn.Sequential(
nn.Linear(parent_dim, hidden_dims[0]),
self.non_linearity())
node_name = '{}{}'.format(parent_node, i)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[parent_node], self._node_index[node_name], f=module)
if depth > 0:
self._build_tree(depth - 1, n_children[1:], node_name, hidden_dims[0], hidden_dims[1:])
else:
self.leaf_nodes.add(self._node_index[node_name])
| CTrLBenchmark-master | ctrl/transformations/transformation_tree.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ctrl.transformations.transformation_tree import TransformationTree
from ctrl.transformations.utils import BatchedTransformation
from torchvision import transforms
class RandomPermutationsTransformation(TransformationTree):
def __init__(self, n_permutations, x_off, y_off, width, height, flatten,
*args, **kwargs):
self.n_permutations = n_permutations
self.x_off = x_off
self.y_off = y_off
self.width = width
self.height = height
self.flatten = flatten
super(RandomPermutationsTransformation, self).__init__(*args, **kwargs)
def build_tree(self):
self.tree.add_node(self._node_index[self.name], name=self.name)
for i in range(self.n_permutations):
node_name = 'permutation_{}'.format(i)
self.leaf_nodes.add(self._node_index[node_name])
perm = RandomPermutation(self.x_off, self.y_off, self.width,
self.height, self.flatten)
trans = transforms.Compose(
[BatchedTransformation(transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor()])),
perm])
# f = BatchedTransformation(trans)
self.tree.add_node(self._node_index[node_name], name=node_name)
self.tree.add_edge(self._node_index[self.name],
self._node_index[node_name], f=trans)
self.depth = 1
return self._node_index[self.name]
class RandomPermutation(object):
"""
Applies a constant random permutation to the images.
"""
def __init__(self, x_off=0, y_off=0, width=None, height=None,
flatten=False):
self.x_off = x_off
self.y_off = y_off
self.width = width
self.height = height
self.x_max = x_off + width
self.y_max = y_off + height
self.kernel = torch.randperm(width * height)
self.flatten = flatten
def __call__(self, input):
return rand_perm_(input.clone(), self.x_off, self.y_off, self.x_max,
self.y_max, self.kernel, self.flatten)
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten):
"""
Applies INPLACE the random permutation defined in `kernel` to the image
`img` on the zone defined by `x`, `y`, `x_max`, `y_max`
:param img: Input image of dimension (B*C*W*H)
:param x: offset on x axis
:param y: offset on y axis
:param x_max: end of the zone to permute on the x axis
:param y_max: end of the zone to permute on the y axis
:param kernel: LongTensor of dim 1 containing one value for each point in
the zone to permute
:return: the permuted image.
"""
assert img.dim() == 4
if img.size(1) != 1:
raise NotImplementedError('Not Implemented for multi-channel images')
zone = img[:, :, x:x_max, y:y_max].contiguous()
img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\
.index_select(1, kernel).view(zone.size())
return img.view(img.size(0), -1) if flatten else img
| CTrLBenchmark-master | ctrl/transformations/randperm_transformation.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.