index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
42,721 |
pydle.client
|
__init__
| null |
def __init__(self, channel):
super().__init__('Not in channel: {}'.format(channel))
self.channel = channel
|
(self, channel)
|
42,724 |
asyncio.coroutines
|
coroutine
|
Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
|
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
DeprecationWarning,
stacklevel=2)
if inspect.iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defined with "async def".
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
else:
# If 'res' is an awaitable, run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, collections.abc.Awaitable):
res = yield from await_meth()
return res
coro = types.coroutine(coro)
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper
|
(func)
|
42,726 |
pydle
|
featurize
|
Put features into proper MRO order.
|
def featurize(*features):
""" Put features into proper MRO order. """
def compare_subclass(left, right):
if issubclass(left, right):
return -1
if issubclass(right, left):
return 1
return 0
sorted_features = sorted(features, key=cmp_to_key(compare_subclass))
name = 'FeaturizedClient[{features}]'.format(
features=', '.join(feature.__name__ for feature in sorted_features))
return type(name, tuple(sorted_features), {})
|
(*features)
|
42,734 |
pyhmmer.hmmer
|
hmmalign
|
Align several sequences to a reference HMM, and return the MSA.
Arguments:
hmm (`~pyhmmer.plan7.HMM`): The reference HMM to use for the
alignment.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): The
sequences to align to the HMM. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly.
trim (`bool`): Trim off any residues that get assigned to
flanking :math:`N` and :math:`C` states (in profile traces)
or :math:`I_0` and :math:`I_m` (in core traces).
digitize (`bool`): If set to `True`, returns a `DigitalMSA`
instead of a `TextMSA`.
all_consensus_cols (`bool`): Force a column to be created for
every consensus column in the model, even if it means having
all gap character in a column.
Returns:
`~pyhmmer.easel.MSA`: A multiple sequence alignment containing
the aligned sequences, either a `TextMSA` or a `DigitalMSA`
depending on the value of the ``digitize`` argument.
See Also:
The `~pyhmmer.plan7.TraceAligner` class, which lets you inspect the
intermediate tracebacks obtained for each alignment before building
a MSA.
.. versionadded:: 0.4.7
|
def hmmalign(
hmm: HMM,
sequences: typing.Iterable[DigitalSequence],
*,
digitize: bool = False,
trim: bool = False,
all_consensus_cols: bool = True,
) -> MSA:
"""Align several sequences to a reference HMM, and return the MSA.
Arguments:
hmm (`~pyhmmer.plan7.HMM`): The reference HMM to use for the
alignment.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): The
sequences to align to the HMM. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly.
trim (`bool`): Trim off any residues that get assigned to
flanking :math:`N` and :math:`C` states (in profile traces)
or :math:`I_0` and :math:`I_m` (in core traces).
digitize (`bool`): If set to `True`, returns a `DigitalMSA`
instead of a `TextMSA`.
all_consensus_cols (`bool`): Force a column to be created for
every consensus column in the model, even if it means having
all gap character in a column.
Returns:
`~pyhmmer.easel.MSA`: A multiple sequence alignment containing
the aligned sequences, either a `TextMSA` or a `DigitalMSA`
depending on the value of the ``digitize`` argument.
See Also:
The `~pyhmmer.plan7.TraceAligner` class, which lets you inspect the
intermediate tracebacks obtained for each alignment before building
a MSA.
.. versionadded:: 0.4.7
"""
aligner = TraceAligner()
if not isinstance(sequences, DigitalSequenceBlock):
sequences = DigitalSequenceBlock(hmm.alphabet, sequences)
traces = aligner.compute_traces(hmm, sequences)
return aligner.align_traces(
hmm,
sequences,
traces,
trim=trim,
digitize=digitize,
all_consensus_cols=all_consensus_cols,
)
|
(hmm: pyhmmer.plan7.HMM, sequences: Iterable[pyhmmer.easel.DigitalSequence], *, digitize: bool = False, trim: bool = False, all_consensus_cols: bool = True) -> pyhmmer.easel.MSA
|
42,736 |
pyhmmer.hmmer
|
hmmpress
|
Press several HMMs into a database.
Calling this function will create 4 files at the given location:
``{output}.h3p`` (containing the optimized profiles),
``{output}.h3m`` (containing the binary HMMs),
``{output}.h3f`` (containing the MSV parameters), and
``{output}.h3i`` (the SSI index mapping the previous files).
Arguments:
hmms (iterable of `~pyhmmer.plan7.HMM`): The HMMs to be pressed
together in the file.
output (`str` or `os.PathLike`): The path to an output location
where to write the different files.
|
def hmmpress(
hmms: typing.Iterable[HMM],
output: typing.Union[str, "os.PathLike[str]"],
) -> int:
"""Press several HMMs into a database.
Calling this function will create 4 files at the given location:
``{output}.h3p`` (containing the optimized profiles),
``{output}.h3m`` (containing the binary HMMs),
``{output}.h3f`` (containing the MSV parameters), and
``{output}.h3i`` (the SSI index mapping the previous files).
Arguments:
hmms (iterable of `~pyhmmer.plan7.HMM`): The HMMs to be pressed
together in the file.
output (`str` or `os.PathLike`): The path to an output location
where to write the different files.
"""
DEFAULT_L = 400
path = os.fspath(output)
nmodel = 0
with contextlib.ExitStack() as ctx:
h3p = ctx.enter_context(open("{}.h3p".format(path), "wb"))
h3m = ctx.enter_context(open("{}.h3m".format(path), "wb"))
h3f = ctx.enter_context(open("{}.h3f".format(path), "wb"))
h3i = ctx.enter_context(SSIWriter("{}.h3i".format(path)))
fh = h3i.add_file(path, format=0)
for hmm in hmms:
# create the background model on the first iteration
if nmodel == 0:
bg = Background(hmm.alphabet)
bg.L = DEFAULT_L
# build the optimized models
gm = Profile(hmm.M, hmm.alphabet)
gm.configure(hmm, bg, DEFAULT_L)
om = gm.to_optimized()
# update the disk offsets of the optimized model to be written
om.offsets.model = h3m.tell()
om.offsets.profile = h3p.tell()
om.offsets.filter = h3f.tell()
# check that hmm has a name
if hmm.name is None:
raise ValueError("HMMs must have a name to be pressed.")
# add the HMM name, and optionally the HMM accession to the index
h3i.add_key(hmm.name, fh, om.offsets.model, 0, 0)
if hmm.accession is not None:
h3i.add_alias(hmm.accession, hmm.name)
# write the HMM in binary format, and the optimized profile
hmm.write(h3m, binary=True)
om.write(h3f, h3p)
nmodel += 1
# return the number of written HMMs
return nmodel
|
(hmms: Iterable[pyhmmer.plan7.HMM], output: Union[str, os.PathLike[str]]) -> int
|
42,737 |
pyhmmer.hmmer
|
hmmscan
|
Scan query sequences against a profile database.
In HMMER many-to-many comparisons, a *scan* is the operation of querying
with sequences a database of profile HMMs. It is necessary slower than
a *search* because reconfiguring profiles between each queries has
additional overhead, so it's recommended to use a *search* if the order
of the comparisons is not important.
The `hmmscan` function offers two ways of managing the database that will
be selected based on the type of the ``profiles`` argument. If
``profiles`` is an `HMMPressedFile` object, `hmmscan` will reopen the
file in each thread, and load profiles *iteratively* to scan with the
query. Otherwise, it will *pre-fetch* the optimized profiles into an
`OptimizedProfileBlock` collection, and share them across queries. The
*pre-fetching* gives much higher performance at the cost of extra
startup time and much higher memory consumption. You may want to check
how much memory is available (for instance with `psutil.virtual_memory`)
before trying to load a whole pHMM database.
Arguments:
queries (iterable of `DigitalSequence`): The query sequences to scan
with the database. Passing a single query is supported.
profiles (iterable of `HMM`, `Profile` or `OptimizedProfile`): A
database of profiles to query. If you plan on using the
same targets several times, consider converting them into
`OptimizedProfile` and storing them into an `OptimizedProfileBlock`
ahead of time. If a `HMMPressedFile` is given, profiles will be
loaded iteratively from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1``
to run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
background (`pyhmmer.plan7.Background`, *optional*): A background
object to use for configuring the profiles. If `None` given,
create a default one.
Yields:
`~pyhmmer.plan7.TopHits`: An object reporting *top hits* for each
query, in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
and the profile do not share the same alphabet.
Note:
Any additional keyword arguments passed to the `phmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
Hint:
If reading the profiles from a pressed HMM database, make sure to
use the `HMMFile.optimized_profiles` method so that profiles are
read iteratively from the file during the scan loop::
>>> with HMMFile("tests/data/hmms/db/PF02826.hmm") as hmm_file:
... targets = hmm_file.optimized_profiles()
... all_hits = list(hmmscan(proteins, targets, E=1e-10))
>>> sum(len(hits) for hits in all_hits)
6
Otherwise, passing ``hmm_file`` as the ``profiles`` argument of
`hmmscan` would cause the entire HMM file to be loaded in memory
into an `OptimizedProfileBlock` otherwise.
.. versionadded:: 0.7.0
|
def hmmscan(
queries: typing.Union[DigitalSequence, typing.Iterable[DigitalSequence]],
profiles: typing.Iterable[typing.Union[HMM, Profile, OptimizedProfile]],
*,
cpus: int = 0,
callback: typing.Optional[typing.Callable[[DigitalSequence, int], None]] = None,
background: typing.Optional[Background] = None,
**options, # type: typing.Dict[str, object]
) -> typing.Iterator[TopHits]:
"""Scan query sequences against a profile database.
In HMMER many-to-many comparisons, a *scan* is the operation of querying
with sequences a database of profile HMMs. It is necessary slower than
a *search* because reconfiguring profiles between each queries has
additional overhead, so it's recommended to use a *search* if the order
of the comparisons is not important.
The `hmmscan` function offers two ways of managing the database that will
be selected based on the type of the ``profiles`` argument. If
``profiles`` is an `HMMPressedFile` object, `hmmscan` will reopen the
file in each thread, and load profiles *iteratively* to scan with the
query. Otherwise, it will *pre-fetch* the optimized profiles into an
`OptimizedProfileBlock` collection, and share them across queries. The
*pre-fetching* gives much higher performance at the cost of extra
startup time and much higher memory consumption. You may want to check
how much memory is available (for instance with `psutil.virtual_memory`)
before trying to load a whole pHMM database.
Arguments:
queries (iterable of `DigitalSequence`): The query sequences to scan
with the database. Passing a single query is supported.
profiles (iterable of `HMM`, `Profile` or `OptimizedProfile`): A
database of profiles to query. If you plan on using the
same targets several times, consider converting them into
`OptimizedProfile` and storing them into an `OptimizedProfileBlock`
ahead of time. If a `HMMPressedFile` is given, profiles will be
loaded iteratively from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1``
to run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
background (`pyhmmer.plan7.Background`, *optional*): A background
object to use for configuring the profiles. If `None` given,
create a default one.
Yields:
`~pyhmmer.plan7.TopHits`: An object reporting *top hits* for each
query, in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
and the profile do not share the same alphabet.
Note:
Any additional keyword arguments passed to the `phmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
Hint:
If reading the profiles from a pressed HMM database, make sure to
use the `HMMFile.optimized_profiles` method so that profiles are
read iteratively from the file during the scan loop::
>>> with HMMFile("tests/data/hmms/db/PF02826.hmm") as hmm_file:
... targets = hmm_file.optimized_profiles()
... all_hits = list(hmmscan(proteins, targets, E=1e-10))
>>> sum(len(hits) for hits in all_hits)
6
Otherwise, passing ``hmm_file`` as the ``profiles`` argument of
`hmmscan` would cause the entire HMM file to be loaded in memory
into an `OptimizedProfileBlock` otherwise.
.. versionadded:: 0.7.0
"""
_alphabet = Alphabet.amino()
_cpus = cpus if cpus > 0 else psutil.cpu_count(logical=False) or os.cpu_count() or 1
_background = Background(_alphabet) if background is None else background
options.setdefault("background", _background) # type: ignore
if not isinstance(queries, collections.abc.Iterable):
queries = (queries,)
if isinstance(profiles, HMMPressedFile):
alphabet = _alphabet # FIXME: try to detect from content instead?
targets = profiles
elif isinstance(profiles, OptimizedProfileBlock):
alphabet = profiles.alphabet
targets = profiles # type: ignore
else:
alphabet = _alphabet
block = OptimizedProfileBlock(_alphabet)
for item in profiles:
if isinstance(item, HMM):
profile = Profile(item.M, item.alphabet)
profile.configure(item, _background)
item = profile
if isinstance(item, Profile):
item = item.to_optimized()
if isinstance(item, OptimizedProfile):
block.append(item)
else:
ty = type(item).__name__
raise TypeError(
"Expected HMM, Profile or OptimizedProfile, found {}".format(ty)
)
targets = block # type: ignore
dispatcher = _SCANDispatcher(
queries=queries,
targets=targets,
cpus=_cpus,
callback=callback,
pipeline_class=Pipeline,
alphabet=alphabet,
builder=None,
**options,
)
return dispatcher.run()
|
(queries: Union[pyhmmer.easel.DigitalSequence, Iterable[pyhmmer.easel.DigitalSequence]], profiles: Iterable[Union[pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile]], *, cpus: int = 0, callback: Optional[Callable[[pyhmmer.easel.DigitalSequence, int], NoneType]] = None, background: Optional[pyhmmer.plan7.Background] = None, **options) -> Iterator[pyhmmer.plan7.TopHits]
|
42,738 |
pyhmmer.hmmer
|
hmmsearch
|
Search HMM profiles against a sequence database.
In HMMER many-to-many comparisons, a *search* is the operation of
querying with profile HMMs a database of sequences.
The `hmmsearch` function offers two ways of managing the database that
will be selected based on the type of the ``sequences`` argument. If
``sequences`` is an `SequenceFile` object, `hmmsearch` will reopen the
file in each thread, and load targets *iteratively* to scan with the
query. Otherwise, it will *pre-fetch* the target sequences into a
`DigitalSequenceBlock` collection, and share them across threads
without copy. The *pre-fetching* gives much higher performance at the
cost of extra startup time and much higher memory consumption. You may
want to check how much memory is available (for instance with
`psutil.virtual_memory`) before trying to load a whole sequence database,
but it is really recommended to do so whenever possible.
Arguments:
queries (iterable of `HMM`, `Profile` or `OptimizedProfile`): The
query HMMs or profiles to search for in the database. Note that
passing a single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A
database of sequences to query. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1``
to run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
Yields:
`~pyhmmer.plan7.TopHits`: An object reporting *top hits* for each
query, in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query HMMs
and the sequences do not share the same alphabet.
Note:
Any additional arguments passed to the `hmmsearch` function will be
passed transparently to the `~pyhmmer.plan7.Pipeline` to be created.
For instance, to run a ``hmmsearch`` using a bitscore cutoffs of
5 instead of the default E-value cutoff, use::
>>> hits = next(hmmsearch(thioesterase, proteins, T=5))
>>> hits[0].score
8.601...
.. versionadded:: 0.1.0
.. versionchanged:: 0.4.9
Allow using `Profile` and `OptimizedProfile` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
|
def hmmsearch(
queries: typing.Union[_SEARCHQueryType, typing.Iterable[_SEARCHQueryType]],
sequences: typing.Iterable[DigitalSequence],
*,
cpus: int = 0,
callback: typing.Optional[typing.Callable[[_SEARCHQueryType, int], None]] = None,
**options, # type: typing.Dict[str, object]
) -> typing.Iterator[TopHits]:
"""Search HMM profiles against a sequence database.
In HMMER many-to-many comparisons, a *search* is the operation of
querying with profile HMMs a database of sequences.
The `hmmsearch` function offers two ways of managing the database that
will be selected based on the type of the ``sequences`` argument. If
``sequences`` is an `SequenceFile` object, `hmmsearch` will reopen the
file in each thread, and load targets *iteratively* to scan with the
query. Otherwise, it will *pre-fetch* the target sequences into a
`DigitalSequenceBlock` collection, and share them across threads
without copy. The *pre-fetching* gives much higher performance at the
cost of extra startup time and much higher memory consumption. You may
want to check how much memory is available (for instance with
`psutil.virtual_memory`) before trying to load a whole sequence database,
but it is really recommended to do so whenever possible.
Arguments:
queries (iterable of `HMM`, `Profile` or `OptimizedProfile`): The
query HMMs or profiles to search for in the database. Note that
passing a single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A
database of sequences to query. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1``
to run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
Yields:
`~pyhmmer.plan7.TopHits`: An object reporting *top hits* for each
query, in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query HMMs
and the sequences do not share the same alphabet.
Note:
Any additional arguments passed to the `hmmsearch` function will be
passed transparently to the `~pyhmmer.plan7.Pipeline` to be created.
For instance, to run a ``hmmsearch`` using a bitscore cutoffs of
5 instead of the default E-value cutoff, use::
>>> hits = next(hmmsearch(thioesterase, proteins, T=5))
>>> hits[0].score
8.601...
.. versionadded:: 0.1.0
.. versionchanged:: 0.4.9
Allow using `Profile` and `OptimizedProfile` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
"""
_cpus = cpus if cpus > 0 else psutil.cpu_count(logical=False) or os.cpu_count() or 1
if not isinstance(queries, collections.abc.Iterable):
queries = (queries,)
if isinstance(sequences, SequenceFile):
sequence_file: SequenceFile = sequences
if sequence_file.name is None:
raise ValueError("expected named `SequenceFile` for targets")
if not sequence_file.digital:
raise ValueError("expected digital mode `SequenceFile` for targets")
assert sequence_file.alphabet is not None
alphabet = sequence_file.alphabet
targets: typing.Union[SequenceFile, DigitalSequenceBlock] = sequence_file
elif isinstance(sequences, DigitalSequenceBlock):
alphabet = sequences.alphabet
targets = sequences
else:
queries = peekable(queries)
try:
alphabet = queries.peek().alphabet
targets = DigitalSequenceBlock(alphabet, sequences)
except StopIteration:
alphabet = Alphabet.amino()
targets = DigitalSequenceBlock(alphabet)
dispatcher = _SEARCHDispatcher(
queries=queries,
targets=targets,
cpus=_cpus,
callback=callback,
alphabet=alphabet,
builder=None,
pipeline_class=Pipeline,
**options,
)
return dispatcher.run()
|
(queries: Union[pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile, Iterable[Union[pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile]]], sequences: Iterable[pyhmmer.easel.DigitalSequence], *, cpus: int = 0, callback: Optional[Callable[[Union[pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile], int], NoneType]] = None, **options) -> Iterator[pyhmmer.plan7.TopHits]
|
42,739 |
pyhmmer.hmmer
|
jackhmmer
|
Search protein sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence`): The query sequences to search
for in the sequence database. Passing a single sequence object
is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A database
of sequences to query. If you plan on using the same sequences
several times, consider storing them into a
`~pyhmmer.easel.DigitalSequenceBlock` directly. `jackhmmer` does
not support passing a `~pyhmmer.easel.SequenceFile` at the
moment.
max_iterations (`int`): The maximum number of iterations for the
search. Hits will be returned early if the searched converged.
select_hits (callable, optional): A function or callable object
for manually selecting hits during each iteration. It should
take a single `~pyhmmer.plan7.TopHits` argument and change the
inclusion of individual hits with the `~pyhmmer.plan7.Hit.include`
and `~pyhmmer.plan7.Hit.drop` methods of `~pyhmmer.plan7.Hit`
objects.
checkpoints (`bool`): A logical flag to return the results at each
iteration 'checkpoint'. If `True`, then an iterable of up to
``max_iterations`` `~pyhmmer.plan7.IterationResult` will be
returned, rather than just the final iteration. This is similar
to ``--chkhmm`` amd ``--chkali`` flags from HMMER3's
``jackhmmer`` interface.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.IterationResult`: An *iteration result* instance for
each query, in the same order the queries were passed in the input.
If ``checkpoint`` option is `True`, all iterations will be returned
instead of the last one.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
the profile or the optional builder do not share the same
alphabet.
Note:
Any additional keyword arguments passed to the `jackhmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
Caution:
Default values used for ``jackhmmer`` do not correspond to the
default parameters used for creating a pipeline in the other cases.
If no parameter value is given as a keyword argument, `jackhmmer`
will create the pipeline with ``incE=0.001`` and ``incdomE=0.001``,
where a default `~pyhmmer.plan7.Pipeline` would use ``incE=0.01``
and ``incdomE=0.01``.
.. versionadded:: 0.8.0
|
def jackhmmer(
queries: typing.Union[_JACKHMMERQueryType, typing.Iterable[_JACKHMMERQueryType]],
sequences: typing.Iterable[DigitalSequence],
*,
max_iterations: typing.Optional[int] = 5,
select_hits: typing.Optional[typing.Callable[[TopHits], None]] = None,
checkpoints: bool = False,
cpus: int = 0,
callback: typing.Optional[typing.Callable[[_JACKHMMERQueryType, int], None]] = None,
builder: typing.Optional[Builder] = None,
**options, # type: object
) -> typing.Union[
typing.Iterator[IterationResult], typing.Iterator[typing.Iterable[IterationResult]]
]:
"""Search protein sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence`): The query sequences to search
for in the sequence database. Passing a single sequence object
is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A database
of sequences to query. If you plan on using the same sequences
several times, consider storing them into a
`~pyhmmer.easel.DigitalSequenceBlock` directly. `jackhmmer` does
not support passing a `~pyhmmer.easel.SequenceFile` at the
moment.
max_iterations (`int`): The maximum number of iterations for the
search. Hits will be returned early if the searched converged.
select_hits (callable, optional): A function or callable object
for manually selecting hits during each iteration. It should
take a single `~pyhmmer.plan7.TopHits` argument and change the
inclusion of individual hits with the `~pyhmmer.plan7.Hit.include`
and `~pyhmmer.plan7.Hit.drop` methods of `~pyhmmer.plan7.Hit`
objects.
checkpoints (`bool`): A logical flag to return the results at each
iteration 'checkpoint'. If `True`, then an iterable of up to
``max_iterations`` `~pyhmmer.plan7.IterationResult` will be
returned, rather than just the final iteration. This is similar
to ``--chkhmm`` amd ``--chkali`` flags from HMMER3's
``jackhmmer`` interface.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.IterationResult`: An *iteration result* instance for
each query, in the same order the queries were passed in the input.
If ``checkpoint`` option is `True`, all iterations will be returned
instead of the last one.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
the profile or the optional builder do not share the same
alphabet.
Note:
Any additional keyword arguments passed to the `jackhmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
Caution:
Default values used for ``jackhmmer`` do not correspond to the
default parameters used for creating a pipeline in the other cases.
If no parameter value is given as a keyword argument, `jackhmmer`
will create the pipeline with ``incE=0.001`` and ``incdomE=0.001``,
where a default `~pyhmmer.plan7.Pipeline` would use ``incE=0.01``
and ``incdomE=0.01``.
.. versionadded:: 0.8.0
"""
_alphabet = Alphabet.amino()
_cpus = cpus if cpus > 0 else psutil.cpu_count(logical=False) or os.cpu_count() or 1
_builder = Builder(_alphabet, architecture="hand") if builder is None else builder
options.setdefault("incE", 0.001)
options.setdefault("incdomE", 0.001)
if not isinstance(queries, collections.abc.Iterable):
queries = (queries,)
if isinstance(sequences, SequenceFile):
sequence_file: SequenceFile = sequences
if sequence_file.name is None:
raise ValueError("expected named `SequenceFile` for targets")
if not sequence_file.digital:
raise ValueError("expected digital mode `SequenceFile` for targets")
assert sequence_file.alphabet is not None
alphabet = sequence_file.alphabet
targets = typing.cast(DigitalSequenceBlock, sequence_file.read_block())
elif isinstance(sequences, DigitalSequenceBlock):
alphabet = sequences.alphabet
targets = sequences
else:
alphabet = _alphabet
targets = DigitalSequenceBlock(_alphabet, sequences)
dispatcher = _JACKHMMERDispatcher( # type: ignore
queries=queries,
targets=targets,
cpus=_cpus,
callback=callback,
pipeline_class=Pipeline,
alphabet=alphabet,
builder=_builder,
max_iterations=max_iterations,
select_hits=select_hits,
checkpoints=checkpoints,
**options,
)
return dispatcher.run()
|
(queries: Union[pyhmmer.easel.DigitalSequence, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile, Iterable[Union[pyhmmer.easel.DigitalSequence, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile]]], sequences: Iterable[pyhmmer.easel.DigitalSequence], *, max_iterations: Optional[int] = 5, select_hits: Optional[Callable[[pyhmmer.plan7.TopHits], NoneType]] = None, checkpoints: bool = False, cpus: int = 0, callback: Optional[Callable[[Union[pyhmmer.easel.DigitalSequence, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile], int], NoneType]] = None, builder: Optional[pyhmmer.plan7.Builder] = None, **options) -> Union[Iterator[pyhmmer.plan7.IterationResult], Iterator[Iterable[pyhmmer.plan7.IterationResult]]]
|
42,740 |
pyhmmer.hmmer
|
nhmmer
|
Search nucleotide sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence`, `DigitalMSA`, `HMM`): The
query sequences or profiles to search for in the sequence
database. Passing a single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A
database of sequences to query. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.TopHits`: A *top hits* instance for each query,
in the same order the queries were passed in the input.
Note:
Any additional keyword arguments passed to the `nhmmer` function
will be passed to the `~pyhmmer.plan7.LongTargetsPipeline` created
in each worker thread. The ``strand`` argument can be used to
restrict the search on the direct or reverse strand.
Caution:
This function is not just `phmmer` for nucleotide sequences; it
actually uses a `~pyhmmer.plan7.LongTargetsPipeline` internally
instead of processing each target sequence in its entirety when
searching for hits. This avoids hitting the maximum target size
that can be used (100,000 residues), which may be a problem for
some larger genomes.
.. versionadded:: 0.3.0
.. versionchanged:: 0.4.9
Allow using `Profile` and `OptimizedProfile` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
|
def nhmmer(
queries: typing.Union[_NHMMERQueryType, typing.Iterable[_NHMMERQueryType]],
sequences: typing.Iterable[DigitalSequence],
*,
cpus: int = 0,
callback: typing.Optional[typing.Callable[[_NHMMERQueryType, int], None]] = None,
builder: typing.Optional[Builder] = None,
**options, # type: typing.Dict[str, object]
) -> typing.Iterator[TopHits]:
"""Search nucleotide sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence`, `DigitalMSA`, `HMM`): The
query sequences or profiles to search for in the sequence
database. Passing a single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A
database of sequences to query. If you plan on using the
same sequences several times, consider storing them into
a `~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.TopHits`: A *top hits* instance for each query,
in the same order the queries were passed in the input.
Note:
Any additional keyword arguments passed to the `nhmmer` function
will be passed to the `~pyhmmer.plan7.LongTargetsPipeline` created
in each worker thread. The ``strand`` argument can be used to
restrict the search on the direct or reverse strand.
Caution:
This function is not just `phmmer` for nucleotide sequences; it
actually uses a `~pyhmmer.plan7.LongTargetsPipeline` internally
instead of processing each target sequence in its entirety when
searching for hits. This avoids hitting the maximum target size
that can be used (100,000 residues), which may be a problem for
some larger genomes.
.. versionadded:: 0.3.0
.. versionchanged:: 0.4.9
Allow using `Profile` and `OptimizedProfile` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
"""
_alphabet = Alphabet.dna()
_cpus = cpus if cpus > 0 else psutil.cpu_count(logical=False) or os.cpu_count() or 1
if builder is None:
_builder = Builder(
_alphabet,
seed=options.get("seed", 42),
window_length=options.get("window_length"),
window_beta=options.get("window_beta"),
)
else:
_builder = builder
if not isinstance(queries, collections.abc.Iterable):
queries = (queries,)
if isinstance(sequences, SequenceFile):
sequence_file: SequenceFile = sequences
if sequence_file.name is None:
raise ValueError("expected named `SequenceFile` for targets")
if not sequence_file.digital:
raise ValueError("expected digital mode `SequenceFile` for targets")
assert sequence_file.alphabet is not None
alphabet = sequence_file.alphabet
targets: typing.Union[SequenceFile, DigitalSequenceBlock] = sequence_file
elif isinstance(sequences, DigitalSequenceBlock):
alphabet = sequences.alphabet
targets = sequences
else:
alphabet = _alphabet
targets = DigitalSequenceBlock(_alphabet, sequences)
dispatcher = _NHMMERDispatcher(
queries=queries,
targets=targets,
cpus=_cpus,
callback=callback,
pipeline_class=LongTargetsPipeline,
alphabet=alphabet,
builder=_builder,
**options,
)
return dispatcher.run()
|
(queries: Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile, Iterable[Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile]]], sequences: Iterable[pyhmmer.easel.DigitalSequence], *, cpus: int = 0, callback: Optional[Callable[[Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA, pyhmmer.plan7.HMM, pyhmmer.plan7.Profile, pyhmmer.plan7.OptimizedProfile], int], NoneType]] = None, builder: Optional[pyhmmer.plan7.Builder] = None, **options) -> Iterator[pyhmmer.plan7.TopHits]
|
42,741 |
pyhmmer.hmmer
|
phmmer
|
Search protein sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence` or `DigitalMSA`): The query
sequences to search for in the sequence database. Passing a
single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A database
of sequences to query. If you plan on using the same sequences
several times, consider storing them into a
`~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.TopHits`: A *top hits* instance for each query,
in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
the profile or the optional builder do not share the same
alphabet.
Note:
Any additional keyword arguments passed to the `phmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
.. versionadded:: 0.2.0
.. versionchanged:: 0.3.0
Allow using `DigitalMSA` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
|
def phmmer(
queries: typing.Union[_PHMMERQueryType, typing.Iterable[_PHMMERQueryType]],
sequences: typing.Iterable[DigitalSequence],
*,
cpus: int = 0,
callback: typing.Optional[typing.Callable[[_PHMMERQueryType, int], None]] = None,
builder: typing.Optional[Builder] = None,
**options, # type: typing.Dict[str, object]
) -> typing.Iterator[TopHits]:
"""Search protein sequences against a sequence database.
Arguments:
queries (iterable of `DigitalSequence` or `DigitalMSA`): The query
sequences to search for in the sequence database. Passing a
single object is supported.
sequences (iterable of `~pyhmmer.easel.DigitalSequence`): A database
of sequences to query. If you plan on using the same sequences
several times, consider storing them into a
`~pyhmmer.easel.DigitalSequenceBlock` directly. If a
`SequenceFile` is given, profiles will be loaded iteratively
from disk rather than prefetched.
cpus (`int`): The number of threads to run in parallel. Pass ``1`` to
run everything in the main thread, ``0`` to automatically
select a suitable number (using `psutil.cpu_count`), or any
positive number otherwise.
callback (callable): A callback that is called everytime a query is
processed with two arguments: the query, and the total number
of queries. This can be used to display progress in UI.
builder (`~pyhmmer.plan7.Builder`, optional): A builder to configure
how the queries are converted to HMMs. Passing `None` will create
a default instance.
Yields:
`~pyhmmer.plan7.TopHits`: A *top hits* instance for each query,
in the same order the queries were passed in the input.
Raises:
`~pyhmmer.errors.AlphabetMismatch`: When any of the query sequence
the profile or the optional builder do not share the same
alphabet.
Note:
Any additional keyword arguments passed to the `phmmer` function
will be passed transparently to the `~pyhmmer.plan7.Pipeline` to
be created in each worker thread.
.. versionadded:: 0.2.0
.. versionchanged:: 0.3.0
Allow using `DigitalMSA` queries.
.. versionchanged:: 0.7.0
Queries may now be an iterable of different types, or a single object.
"""
_alphabet = Alphabet.amino()
_cpus = cpus if cpus > 0 else psutil.cpu_count(logical=False) or os.cpu_count() or 1
_builder = Builder(_alphabet) if builder is None else builder
if not isinstance(queries, collections.abc.Iterable):
queries = (queries,)
if isinstance(sequences, SequenceFile):
sequence_file: SequenceFile = sequences
if sequence_file.name is None:
raise ValueError("expected named `SequenceFile` for targets")
if not sequence_file.digital:
raise ValueError("expected digital mode `SequenceFile` for targets")
assert sequence_file.alphabet is not None
alphabet = sequence_file.alphabet
targets: typing.Union[SequenceFile, DigitalSequenceBlock] = sequence_file
elif isinstance(sequences, DigitalSequenceBlock):
alphabet = sequences.alphabet
targets = sequences
else:
alphabet = _alphabet
targets = DigitalSequenceBlock(_alphabet, sequences)
dispatcher = _PHMMERDispatcher(
queries=queries,
targets=targets,
cpus=_cpus,
callback=callback,
pipeline_class=Pipeline,
alphabet=alphabet,
builder=_builder,
**options,
)
return dispatcher.run()
|
(queries: Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA, Iterable[Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA]]], sequences: Iterable[pyhmmer.easel.DigitalSequence], *, cpus: int = 0, callback: Optional[Callable[[Union[pyhmmer.easel.DigitalSequence, pyhmmer.easel.DigitalMSA], int], NoneType]] = None, builder: Optional[pyhmmer.plan7.Builder] = None, **options) -> Iterator[pyhmmer.plan7.TopHits]
|
42,744 |
preprocess_cancellation
|
HullTracker
| null |
class HullTracker:
def add_point(self, point: Point):
...
def center(self) -> Point:
...
def exterior(self) -> list[Point]:
...
@classmethod
def create(cls):
if shapely:
return ShapelyHullTracker()
return SimpleHullTracker()
|
()
|
42,745 |
preprocess_cancellation
|
add_point
| null |
def add_point(self, point: Point):
...
|
(self, point: preprocess_cancellation.Point)
|
42,746 |
preprocess_cancellation
|
center
| null |
def center(self) -> Point:
...
|
(self) -> preprocess_cancellation.Point
|
42,747 |
preprocess_cancellation
|
exterior
| null |
def exterior(self) -> list[Point]:
...
|
(self) -> list[preprocess_cancellation.Point]
|
42,748 |
preprocess_cancellation
|
KnownObject
|
KnownObject(name, hull)
|
class KnownObject(NamedTuple):
name: str
hull: HullTracker
|
(name: str, hull: preprocess_cancellation.HullTracker)
|
42,750 |
namedtuple_KnownObject
|
__new__
|
Create new instance of KnownObject(name, hull)
|
from builtins import function
|
(_cls, name: ForwardRef('str'), hull: ForwardRef('HullTracker'))
|
42,753 |
collections
|
_replace
|
Return a new KnownObject object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
42,755 |
preprocess_cancellation
|
Point
|
Point(x, y)
|
class Point(NamedTuple):
x: float
y: float
|
(x: float, y: float)
|
42,757 |
namedtuple_Point
|
__new__
|
Create new instance of Point(x, y)
|
from builtins import function
|
(_cls, x: ForwardRef('float'), y: ForwardRef('float'))
|
42,760 |
collections
|
_replace
|
Return a new Point object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
42,761 |
preprocess_cancellation
|
ShapelyHullTracker
| null |
class ShapelyHullTracker(HullTracker):
def __init__(self):
self.pos = None
self.points: Set[Point] = set()
def add_point(self, point: Point):
self.points.add(point)
def center(self):
if not self.points:
return
return Point(
statistics.mean(p[0] for p in self.points),
statistics.mean(p[1] for p in self.points),
)
def exterior(self):
if not self.points:
return
return list(
shapely.geometry.MultiPoint(list(self.points))
.convex_hull.simplify(0.02, preserve_topology=False)
.exterior.coords
)
|
()
|
42,762 |
preprocess_cancellation
|
__init__
| null |
def __init__(self):
self.pos = None
self.points: Set[Point] = set()
|
(self)
|
42,763 |
preprocess_cancellation
|
add_point
| null |
def add_point(self, point: Point):
self.points.add(point)
|
(self, point: preprocess_cancellation.Point)
|
42,764 |
preprocess_cancellation
|
center
| null |
def center(self):
if not self.points:
return
return Point(
statistics.mean(p[0] for p in self.points),
statistics.mean(p[1] for p in self.points),
)
|
(self)
|
42,765 |
preprocess_cancellation
|
exterior
| null |
def exterior(self):
if not self.points:
return
return list(
shapely.geometry.MultiPoint(list(self.points))
.convex_hull.simplify(0.02, preserve_topology=False)
.exterior.coords
)
|
(self)
|
42,766 |
preprocess_cancellation
|
SimpleHullTracker
| null |
class SimpleHullTracker(HullTracker):
def __init__(self) -> None:
self.xmin = 999999
self.ymin = 999999
self.xmax = -999999
self.ymax = -999999
self.count_points = 0
self.xsum = 0
self.ysum = 0
def add_point(self, point: Point):
self.xsum += point.x
self.ysum += point.y
self.count_points += 1
if point.x < self.xmin:
self.xmin = point.x
if point.y < self.ymin:
self.ymin = point.y
if point.x > self.xmax:
self.xmax = point.x
if point.y > self.ymax:
self.ymax = point.y
def center(self):
if not self.count_points:
return
return Point(
self.xsum / self.count_points,
self.ysum / self.count_points,
)
def exterior(self):
if not self.count_points:
return
return boundingbox((self.xmin, self.ymin), (self.xmax, self.ymax))
|
() -> 'None'
|
42,767 |
preprocess_cancellation
|
__init__
| null |
def __init__(self) -> None:
self.xmin = 999999
self.ymin = 999999
self.xmax = -999999
self.ymax = -999999
self.count_points = 0
self.xsum = 0
self.ysum = 0
|
(self) -> NoneType
|
42,768 |
preprocess_cancellation
|
add_point
| null |
def add_point(self, point: Point):
self.xsum += point.x
self.ysum += point.y
self.count_points += 1
if point.x < self.xmin:
self.xmin = point.x
if point.y < self.ymin:
self.ymin = point.y
if point.x > self.xmax:
self.xmax = point.x
if point.y > self.ymax:
self.ymax = point.y
|
(self, point: preprocess_cancellation.Point)
|
42,769 |
preprocess_cancellation
|
center
| null |
def center(self):
if not self.count_points:
return
return Point(
self.xsum / self.count_points,
self.ysum / self.count_points,
)
|
(self)
|
42,770 |
preprocess_cancellation
|
exterior
| null |
def exterior(self):
if not self.count_points:
return
return boundingbox((self.xmin, self.ymin), (self.xmax, self.ymax))
|
(self)
|
42,779 |
preprocess_cancellation
|
_clean_id
| null |
def _clean_id(id):
return re.sub(r"\W+", "_", id).strip("_")
|
(id)
|
42,780 |
preprocess_cancellation
|
_dump_coords
| null |
def _dump_coords(coords: List[float]) -> str:
return ",".join(map("{:0.3f}".format, coords))
|
(coords: List[float]) -> str
|
42,781 |
preprocess_cancellation
|
_main
| null |
def _main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"--output-suffix",
"-o",
help="Add a suffix to gcoode output. Without this, gcode will be rewritten in place",
)
argparser.add_argument(
"--disable-shapely", help="Disable using shapely to generate a hull polygon for objects", action="store_true"
)
argparser.add_argument("gcode", nargs="*")
exitcode = 0
args = argparser.parse_args()
if args.disable_shapely:
global shapely
shapely = None
for filename in args.gcode:
if not process_file_for_cancellation(filename, args.output_suffix):
exitcode = 1
sys.exit(exitcode)
|
()
|
42,783 |
preprocess_cancellation
|
boundingbox
| null |
def boundingbox(pmin: Point, pmax: Point):
return [
(pmin[0], pmin[1]),
(pmin[0], pmax[1]),
(pmax[0], pmax[1]),
(pmax[0], pmin[1]),
]
|
(pmin: preprocess_cancellation.Point, pmax: preprocess_cancellation.Point)
|
42,784 |
preprocess_cancellation
|
define_object
| null |
def define_object(
name,
center: Optional[Point] = None,
polygon: Optional[Point] = None,
region: Optional[List[Point]] = None,
):
yield f"EXCLUDE_OBJECT_DEFINE NAME={name}"
if center:
yield f" CENTER={_dump_coords(center)}"
if polygon:
yield f" POLYGON={json.dumps(polygon, separators=(',', ':'))}"
if region:
yield f" REGION={_dump_coords(region, separators=(',', ':'))}"
yield "\n"
|
(name, center: Optional[preprocess_cancellation.Point] = None, polygon: Optional[preprocess_cancellation.Point] = None, region: Optional[List[preprocess_cancellation.Point]] = None)
|
42,785 |
preprocess_cancellation
|
header
| null |
def header(object_count):
yield "\n\n"
yield HEADER_MARKER
yield f"; {object_count} known objects\n"
|
(object_count)
|
42,786 |
preprocess_cancellation
|
identify_slicer_marker
| null |
def identify_slicer_marker(line):
for name, (marker, processor) in SLICERS.items():
if line.strip().startswith(marker):
logger.debug("Identified slicer %s", name)
return processor
|
(line)
|
42,789 |
preprocess_cancellation
|
object_end_marker
| null |
def object_end_marker(object_name):
yield f"EXCLUDE_OBJECT_END NAME={object_name}\n"
|
(object_name)
|
42,790 |
preprocess_cancellation
|
object_start_marker
| null |
def object_start_marker(object_name):
yield f"EXCLUDE_OBJECT_START NAME={object_name}\n"
|
(object_name)
|
42,791 |
preprocess_cancellation
|
parse_gcode
| null |
def parse_gcode(line):
# drop comments
line = line.split(";", maxsplit=1)[0]
command, *params = line.strip().split()
parsed = {}
for param in params:
if "=" in param:
parsed.update(dict(zip(param.split("=", maxsplit=1))))
else:
parsed.update({param[0].upper(): param[1:]})
return command, parsed
|
(line)
|
42,793 |
preprocess_cancellation
|
preprocess_cura
| null |
def preprocess_cura(infile):
known_objects: Dict[str, KnownObject] = {}
current_hull: Optional[HullTracker] = None
last_time_elapsed: str = None
# iterate the file twice, to be able to inject the header markers
for line in infile:
if line.startswith(";MESH:"):
object_name = line.split(":", maxsplit=1)[1].strip()
if object_name == "NONMESH":
continue
if object_name not in known_objects:
known_objects[object_name] = KnownObject(_clean_id(object_name), HullTracker.create())
current_hull = known_objects[object_name].hull
if current_hull and line.strip().lower().startswith("g"):
_, params = parse_gcode(line)
if float(params.get("E", -1)) > 0 and "X" in params and "Y" in params:
x = float(params["X"])
y = float(params["Y"])
current_hull.add_point(Point(x, y))
if line.startswith(";TIME_ELAPSED:"):
last_time_elapsed = line
infile.seek(0)
for line in infile:
if line.strip() and not line.startswith(";"):
# Inject custom marker
yield from header(len(known_objects))
for mesh_id, hull in known_objects.values():
yield from define_object(
mesh_id,
center=hull.center(),
polygon=hull.exterior(),
)
yield line
break
yield line
current_object = None
for line in infile:
yield line
if line.startswith(";MESH:"):
if current_object:
yield from object_end_marker(current_object)
current_object = None
mesh = line.split(":", maxsplit=1)[1].strip()
if mesh == "NONMESH":
continue
current_object, _ = known_objects[mesh]
yield from object_start_marker(current_object)
if line == last_time_elapsed and current_object:
yield from object_end_marker(current_object)
current_object = None
if current_object:
yield from object_end_marker(current_object)
|
(infile)
|
42,794 |
preprocess_cancellation
|
preprocess_ideamaker
| null |
def preprocess_ideamaker(infile):
# This one is funnier
# theres blocks like this, we can grab all these to get the names and ideamaker's IDs for them.
# ;PRINTING: test_bed_part0.3mf
# ;PRINTING_ID: 0
known_objects: Dict[str, KnownObject] = {}
current_hull: HullTracker = None
for line in infile:
if line.startswith(";TOTAL_NUM:"):
total_num = int(line.split(":")[1].strip())
if line.startswith(";PRINTING:"):
name = line.split(":")[1].strip()
id_line = next(infile)
assert id_line.startswith(";PRINTING_ID:")
id = id_line.split(":")[1].strip()
# Ignore the internal non-object meshes
if id == "-1":
continue
if id not in known_objects:
known_objects[id] = KnownObject(_clean_id(name), HullTracker.create())
current_hull = known_objects[id].hull
if current_hull and line.strip().lower().startswith("g"):
command, params = parse_gcode(line)
if float(params.get("E", -1)) > 0 and "X" in params and "Y" in params:
x = float(params["X"])
y = float(params["Y"])
current_hull.add_point(Point(x, y))
infile.seek(0)
current_object: Optional[KnownObject] = None
for line in infile:
yield line
if line.strip() and not line.startswith(";"):
break
assert total_num == len(known_objects)
yield from header(total_num)
for id, (name, hull) in known_objects.items():
yield from define_object(
name,
center=hull.center(),
polygon=hull.exterior(),
)
yield "\n\n"
for line in infile:
yield line
if line.startswith(";PRINTING_ID:"):
printing_id = line.split(":")[1].strip()
if current_object:
yield from object_end_marker(current_object.name)
current_object = None
if printing_id == "-1":
continue
current_object = known_objects[printing_id]
yield from object_start_marker(current_object.name)
if line == ";REMAINING_TIME: 0\n" and current_object:
yield from object_end_marker(current_object.name)
current_object = None
if current_object:
yield from object_end_marker(current_object.name)
|
(infile)
|
42,795 |
preprocess_cancellation
|
preprocess_m486
| null |
def preprocess_m486(infile):
known_objects: Dict[str, KnownObject] = {}
current_hull: Optional[HullTracker] = None
for line in infile:
if line.startswith("M486"):
_, params = parse_gcode(line)
if "T" in params:
for i in range(-1, int(params["T"])):
known_objects[f"{i}"] = KnownObject(f"{i}", HullTracker.create())
elif "S" in params:
current_hull = known_objects[params["S"]].hull
if current_hull and line.strip().lower().startswith("g"):
_, params = parse_gcode(line)
if float(params.get("E", -1)) > 0 and "X" in params and "Y" in params:
x = float(params["X"])
y = float(params["Y"])
current_hull.add_point(Point(x, y))
infile.seek(0)
current_object = None
for line in infile:
if line.upper().startswith("M486"):
_, params = parse_gcode(line)
if "T" in params:
# Inject custom marker
yield from header(len(known_objects))
for mesh_id, hull in known_objects.values():
if mesh_id == "-1":
continue
yield from define_object(
mesh_id,
center=hull.center(),
polygon=hull.exterior(),
)
if "S" in params:
if current_object:
yield from object_end_marker(current_object.name)
current_object = None
if params["S"] != "-1":
current_object = known_objects[params["S"]]
yield from object_start_marker(current_object.name)
yield "; " # Comment out the original M486 lines
yield line
|
(infile)
|
42,796 |
preprocess_cancellation
|
preprocess_pipe
| null |
def preprocess_pipe(infile):
yield from infile
|
(infile)
|
42,797 |
preprocess_cancellation
|
preprocess_slicer
| null |
def preprocess_slicer(infile):
known_objects: Dict[str, KnownObject] = {}
current_hull: Optional[HullTracker] = None
for line in infile:
if line.startswith("; printing object "):
object_id = line.split("printing object")[1].strip()
if object_id not in known_objects:
known_objects[object_id] = KnownObject(_clean_id(object_id), HullTracker.create())
current_hull = known_objects[object_id].hull
if line.startswith("; stop printing object "):
current_hull = None
if current_hull and line.strip().lower().startswith("g"):
command, params = parse_gcode(line)
if float(params.get("E", -1)) > 0 and "X" in params and "Y" in params:
x = float(params["X"])
y = float(params["Y"])
current_hull.add_point(Point(x, y))
infile.seek(0)
for line in infile:
if line.strip() and not line.startswith(";"):
# Inject custom marker
yield from header(len(known_objects))
for object_id, hull in known_objects.values():
yield from define_object(
object_id,
center=hull.center(),
polygon=hull.exterior(),
)
yield line
break
yield line
for line in infile:
yield line
if line.startswith("; printing object "):
yield from object_start_marker(known_objects[line.split("printing object")[1].strip()].name)
if line.startswith("; stop printing object "):
yield from object_end_marker(known_objects[line.split("printing object")[1].strip()].name)
|
(infile)
|
42,798 |
preprocess_cancellation
|
preprocessor
| null |
def preprocessor(infile, outfile):
logger.debug("Identifying slicer")
found_m486 = False
processor = None
for line in infile:
if line.startswith("EXCLUDE_OBJECT_DEFINE") or line.startswith("DEFINE_OBJECT"):
logger.info("GCode already supports cancellation")
infile.seek(0)
outfile.write(infile.read())
return True
if line.startswith("M486"):
if not found_m486:
logger.info("File has existing M486 markers, converting")
found_m486 = True
processor = preprocess_m486
if not processor:
processor = identify_slicer_marker(line)
infile.seek(0)
for line in processor(infile):
outfile.write(line)
return True
|
(infile, outfile)
|
42,799 |
preprocess_cancellation
|
process_file_for_cancellation
| null |
def process_file_for_cancellation(filename: PathLike, output_suffix=None) -> int:
filepath = pathlib.Path(filename)
outfilepath = filepath
if output_suffix:
outfilepath = outfilepath.with_name(outfilepath.stem + output_suffix + outfilepath.suffix)
tempfilepath = pathlib.Path(tempfile.mktemp())
with filepath.open("r") as fin:
with tempfilepath.open("w") as fout:
res = preprocessor(fin, fout)
if res:
if outfilepath.exists():
outfilepath.unlink()
shutil.move(tempfilepath, outfilepath)
else:
tempfilepath.unlink()
return res
|
(filename: ~PathLike, output_suffix=None) -> int
|
42,805 |
linearmodels.iv.absorbing
|
AbsorbingLS
|
Linear regression with high-dimensional effects
Parameters
----------
dependent : array_like
Endogenous variables (nobs by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
absorb: {DataFrame, Interaction}
The effects or continuous variables to absorb. When using a DataFrame,
effects must be categorical variables. Other variable types are treated
as continuous variables that should be absorbed. When using an
Interaction, variables in the `cat` argument are treated as effects
and variables in the `cont` argument are treated as continuous.
interactions : {DataFrame, Interaction, list[DataFrame, Interaction]}
Interactions containing both categorical and continuous variables. Each
interaction is constructed using the Cartesian product of the categorical
variables to produce the dummy, which are then separately interacted with
each continuous variable.
weights : array_like
Observation weights used in estimation
drop_absorbed : bool
Flag indicating whether to drop absorbed variables
Notes
-----
Capable of estimating models with millions of effects.
Estimates models of the form
.. math::
y_i = x_i \beta + z_i \gamma + \epsilon_i
where :math:`\beta` are parameters of interest and :math:`\gamma`
are not. z may be high-dimensional, although must have fewer
variables than the number of observations in y.
The syntax simplifies specifying high-dimensional z when z consists
of categorical (factor) variables, also known as effects, or when
z contains interactions between continuous variables and categorical
variables, also known as fixed slopes.
The high-dimensional effects are fit using LSMR which avoids inverting
or even constructing the inner product of the regressors. This is
combined with Frish-Waugh-Lovell to orthogonalize x and y from z.
z can contain factors that are perfectly linearly dependent. LSMR
estimates a particular restricted set of parameters that captures the
effect of non-redundant components in z.
See also
--------
Interaction
linearmodels.iv.model.IVLIML
linearmodels.iv.model.IV2SLS
scipy.sparse.linalg.lsmr
Examples
--------
Estimate a model by absorbing 2 categoricals and 2 continuous variables
>>> import numpy as np
>>> import pandas as pd
>>> from linearmodels.iv import AbsorbingLS, Interaction
>>> dep = np.random.standard_normal((20000,1))
>>> exog = np.random.standard_normal((20000,2))
>>> cats = pd.DataFrame({i: pd.Categorical(np.random.randint(1000, size=20000))
... for i in range(2)})
>>> cont = pd.DataFrame({i+2: np.random.standard_normal(20000) for i in range(2)})
>>> absorb = pd.concat([cats, cont], axis=1)
>>> mod = AbsorbingLS(dep, exog, absorb=absorb)
>>> res = mod.fit()
Add interactions between the cartesian product of the categorical and
each continuous variables
>>> iaction = Interaction(cat=cats, cont=cont)
>>> absorb = Interaction(cat=cats) # Other encoding of categoricals
>>> mod = AbsorbingLS(dep, exog, absorb=absorb, interactions=iaction)
|
class AbsorbingLS:
r"""
Linear regression with high-dimensional effects
Parameters
----------
dependent : array_like
Endogenous variables (nobs by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
absorb: {DataFrame, Interaction}
The effects or continuous variables to absorb. When using a DataFrame,
effects must be categorical variables. Other variable types are treated
as continuous variables that should be absorbed. When using an
Interaction, variables in the `cat` argument are treated as effects
and variables in the `cont` argument are treated as continuous.
interactions : {DataFrame, Interaction, list[DataFrame, Interaction]}
Interactions containing both categorical and continuous variables. Each
interaction is constructed using the Cartesian product of the categorical
variables to produce the dummy, which are then separately interacted with
each continuous variable.
weights : array_like
Observation weights used in estimation
drop_absorbed : bool
Flag indicating whether to drop absorbed variables
Notes
-----
Capable of estimating models with millions of effects.
Estimates models of the form
.. math::
y_i = x_i \beta + z_i \gamma + \epsilon_i
where :math:`\beta` are parameters of interest and :math:`\gamma`
are not. z may be high-dimensional, although must have fewer
variables than the number of observations in y.
The syntax simplifies specifying high-dimensional z when z consists
of categorical (factor) variables, also known as effects, or when
z contains interactions between continuous variables and categorical
variables, also known as fixed slopes.
The high-dimensional effects are fit using LSMR which avoids inverting
or even constructing the inner product of the regressors. This is
combined with Frish-Waugh-Lovell to orthogonalize x and y from z.
z can contain factors that are perfectly linearly dependent. LSMR
estimates a particular restricted set of parameters that captures the
effect of non-redundant components in z.
See also
--------
Interaction
linearmodels.iv.model.IVLIML
linearmodels.iv.model.IV2SLS
scipy.sparse.linalg.lsmr
Examples
--------
Estimate a model by absorbing 2 categoricals and 2 continuous variables
>>> import numpy as np
>>> import pandas as pd
>>> from linearmodels.iv import AbsorbingLS, Interaction
>>> dep = np.random.standard_normal((20000,1))
>>> exog = np.random.standard_normal((20000,2))
>>> cats = pd.DataFrame({i: pd.Categorical(np.random.randint(1000, size=20000))
... for i in range(2)})
>>> cont = pd.DataFrame({i+2: np.random.standard_normal(20000) for i in range(2)})
>>> absorb = pd.concat([cats, cont], axis=1)
>>> mod = AbsorbingLS(dep, exog, absorb=absorb)
>>> res = mod.fit()
Add interactions between the cartesian product of the categorical and
each continuous variables
>>> iaction = Interaction(cat=cats, cont=cont)
>>> absorb = Interaction(cat=cats) # Other encoding of categoricals
>>> mod = AbsorbingLS(dep, exog, absorb=absorb, interactions=iaction)
"""
def __init__(
self,
dependent: ArrayLike,
exog: ArrayLike | None = None,
*,
absorb: InteractionVar | None = None,
interactions: InteractionVar | Iterable[InteractionVar] | None = None,
weights: ArrayLike | None = None,
drop_absorbed: bool = False,
) -> None:
self._dependent = IVData(dependent, "dependent")
self._nobs = nobs = self._dependent.shape[0]
self._exog = IVData(exog, "exog", nobs=self._nobs)
self._absorb = absorb
if isinstance(absorb, DataFrame):
self._absorb_inter = Interaction.from_frame(absorb)
elif absorb is None:
self._absorb_inter = Interaction(None, None, nobs)
elif isinstance(absorb, Interaction):
self._absorb_inter = absorb
else:
raise TypeError("absorb must ba a DataFrame or an Interaction")
self._weights = weights
self._is_weighted = False
self._drop_absorbed = drop_absorbed
self._check_weights()
self._interactions = interactions
self._interaction_list: list[Interaction] = []
self._prepare_interactions()
self._absorbed_dependent: DataFrame | None = None
self._absorbed_exog: DataFrame | None = None
self._check_shape()
self._original_index = self._dependent.pandas.index
self._drop_locs = self._drop_missing()
self._columns = self._exog.cols
self._index = self._dependent.rows
self._method = "Absorbing LS"
self._const_col = 0
self._has_constant = False
self._has_constant_exog = self._check_constant()
self._constant_absorbed = False
self._num_params = 0
self._regressors: sp.csc_matrix | None = None
self._regressors_hash: tuple[tuple[str, ...], ...] | None = None
def _drop_missing(self) -> BoolArray:
missing = require(self.dependent.isnull.to_numpy(), requirements="W")
missing |= self.exog.isnull.to_numpy()
missing |= self._absorb_inter.cat.isnull().any(axis=1).to_numpy()
missing |= self._absorb_inter.cont.isnull().any(axis=1).to_numpy()
for interact in self._interaction_list:
missing |= interact.isnull.to_numpy()
if npany(missing):
self.dependent.drop(missing)
self.exog.drop(missing)
self._absorb_inter.drop(missing)
for interact in self._interaction_list:
interact.drop(missing)
missing_warning(missing, stacklevel=4)
return missing
def _check_constant(self) -> bool:
col_delta = ptp(self.exog.ndarray, 0)
has_constant = npany(col_delta == 0)
self._const_col = where(col_delta == 0)[0][0] if has_constant else None
return bool(has_constant)
def _check_weights(self) -> None:
if self._weights is None:
nobs = self._dependent.shape[0]
self._is_weighted = False
self._weight_data = IVData(ones(nobs), "weights")
else:
self._is_weighted = True
weights = IVData(self._weights).ndarray
weights = weights / nanmean(weights)
self._weight_data = IVData(weights, var_name="weights", nobs=self._nobs)
def _check_shape(self) -> None:
nobs = self._nobs
if self._absorb is not None:
if self._absorb_inter.nobs != nobs:
raise ValueError(
"absorb and dependent have different number of observations"
)
for interact in self._interaction_list:
if interact.nobs != nobs:
raise ValueError(
"interactions ({}) and dependent have different number of "
"observations".format(str(interact))
)
@property
def absorbed_dependent(self) -> DataFrame:
"""
Dependent variable with effects absorbed
Returns
-------
DataFrame
Dependent after effects have been absorbed
Raises
------
RuntimeError
If called before `fit` has been used once
"""
if self._absorbed_dependent is not None:
return self._absorbed_dependent
raise RuntimeError(
"fit must be called once before absorbed_dependent is available"
)
@property
def absorbed_exog(self) -> DataFrame:
"""
Exogenous variables with effects absorbed
Returns
-------
DataFrame
Exogenous after effects have been absorbed
Raises
------
RuntimeError
If called before `fit` has been used once
"""
if self._absorbed_exog is not None:
return self._absorbed_exog
raise RuntimeError("fit must be called once before absorbed_exog is available")
@property
def weights(self) -> IVData:
return self._weight_data
@property
def dependent(self) -> IVData:
return self._dependent
@property
def exog(self) -> IVData:
return self._exog
@property
def has_constant(self) -> bool:
return self._has_constant
@property
def instruments(self) -> IVData:
return IVData(None, "instrument", nobs=self._dependent.shape[0])
def _prepare_interactions(self) -> None:
if self._interactions is None:
return
elif isinstance(self._interactions, DataFrame):
self._interaction_list = [Interaction.from_frame(self._interactions)]
elif isinstance(self._interactions, Interaction):
self._interaction_list = [self._interactions]
else:
for interact in self._interactions:
if isinstance(interact, DataFrame):
self._interaction_list.append(Interaction.from_frame(interact))
elif isinstance(interact, Interaction):
self._interaction_list.append(interact)
else:
raise TypeError(
"interactions must contain DataFrames or Interactions"
)
def _first_time_fit(
self,
use_cache: bool,
absorb_options: None | (
dict[str, bool | float | str | ArrayLike | None | dict[str, Any]]
),
method: str,
) -> None:
weights = (
cast(Float64Array, self.weights.ndarray) if self._is_weighted else None
)
use_hdfe = weights is None and method in ("auto", "hdfe")
use_hdfe = use_hdfe and not self._absorb_inter.cont.shape[1]
use_hdfe = use_hdfe and not self._interaction_list
if not use_hdfe and method == "hdfe":
raise RuntimeError(
"HDFE has been set as the method but the model cannot be estimated "
"using HDFE. HDFE requires that the model is unweighted and that the "
"absorbed regressors include only fixed effects (dummy variables)."
)
areg = AbsorbingRegressor(
cat=self._absorb_inter.cat,
cont=self._absorb_inter.cont,
interactions=self._interaction_list,
weights=weights,
)
areg_constant = areg.has_constant
self._regressors = areg.regressors
self._num_params += areg.approx_rank
# Do not double count intercept-like terms
self._has_constant = self._has_constant_exog or areg_constant
self._num_params -= min(self._has_constant_exog, areg_constant)
self._regressors_hash = areg.hash
self._constant_absorbed = self._has_constant_exog and areg_constant
dep = self._dependent.ndarray
exog = cast(Float64Array, self._exog.ndarray)
root_w = sqrt(self._weight_data.ndarray)
dep = root_w * dep
exog = root_w * exog
denom = root_w.T @ root_w
mu_dep = (root_w.T @ dep) / denom
mu_exog = (root_w.T @ exog) / denom
absorb_options = {} if absorb_options is None else absorb_options
assert isinstance(self._regressors, sp.csc_matrix)
if self._regressors.shape[1] > 0:
if use_hdfe:
from pyhdfe import create
absorb_options["drop_singletons"] = False
algo = create(self._absorb_inter.cat, **absorb_options)
dep_exog = column_stack((dep, exog))
resids = algo.residualize(dep_exog)
dep_resid = resids[:, :1]
exog_resid = resids[:, 1:]
else:
self._regressors = preconditioner(self._regressors)[0]
dep_exog = column_stack((dep, exog))
resid = lsmr_annihilate(
self._regressors,
dep_exog,
use_cache,
self._regressors_hash,
**absorb_options,
)
dep_resid = resid[:, :1]
exog_resid = resid[:, 1:]
else:
dep_resid = dep
exog_resid = exog
if self._constant_absorbed:
dep_resid += root_w * mu_dep
exog_resid += root_w * mu_exog
if not self._drop_absorbed:
check_absorbed(exog_resid, self.exog.cols, exog)
else:
ncol = exog_resid.shape[1]
retain = not_absorbed(exog_resid)
if not retain:
raise ValueError(
"All columns in exog have been fully absorbed by the "
"included effects. This model cannot be estimated."
)
elif len(retain) < ncol:
drop = set(range(ncol)).difference(retain)
dropped = ", ".join([str(self.exog.cols[i]) for i in drop])
warnings.warn(
absorbing_warn_msg.format(absorbed_variables=dropped),
AbsorbingEffectWarning,
stacklevel=3,
)
exog_resid = exog_resid[:, retain]
self._columns = [self._columns[i] for i in retain]
self._absorbed_dependent = DataFrame(
dep_resid,
index=self._dependent.pandas.index,
columns=self._dependent.pandas.columns,
)
self._absorbed_exog = DataFrame(
exog_resid, index=self._exog.pandas.index, columns=self._columns
)
def fit(
self,
*,
cov_type: str = "robust",
debiased: bool = False,
method: str = "auto",
absorb_options: None | (
dict[str, bool | float | str | ArrayLike | None | dict[str, Any]]
) = None,
use_cache: bool = True,
lsmr_options: dict[str, float | bool] | None = None,
**cov_config: Any,
) -> AbsorbingLSResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator to use. Supported covariance
estimators are:
* "unadjusted", "homoskedastic" - Classic homoskedastic inference
* "robust", "heteroskedastic" - Heteroskedasticity robust inference
* "kernel" - Heteroskedasticity and autocorrelation robust
inference
* "cluster" - One-way cluster dependent inference.
Heteroskedasticity robust
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
method : str
One of:
* "auto" - (Default). Use HDFE when applicable and fallback to LSMR.
* "lsmr" - Force LSMR.
* "hdfe" - Force HDFE. Raises RuntimeError if the model contains
continuous variables or continuous-binary interactions to absorb or
if the model is weighted.
absorb_options : dict
Dictionary of options to pass to the absorber. Passed to either
scipy.sparse.linalg.lsmr or pyhdfe.create depending on the method used
to absorb the absorbed regressors.
use_cache : bool
Flag indicating whether the variables, once purged from the
absorbed variables and interactions, should be stored in the cache,
and retrieved if available. Cache can dramatically speed up
re-fitting large models when the set of absorbed variables and
interactions are identical.
lsmr_options : dict
Options to ass to scipy.sparse.linalg.lsmr.
.. deprecated:: 4.17
Use absorb_options to pass options
**cov_config
Additional parameters to pass to covariance estimator. The list
of optional parameters differ according to ``cov_type``. See
the documentation of the alternative covariance estimators for
the complete list of available commands.
Returns
-------
AbsorbingLSResults
Results container
Notes
-----
Additional covariance parameters depend on specific covariance used.
The see the docstring of specific covariance estimator for a list of
supported options. Defaults are used if no covariance configuration
is provided.
If use_cache is True, then variables are hashed based on their
contents using either a 64-bit value (if xxhash is installed) or
a 256-bit value. This allows variables to be reused in different
models if the set of absorbing variables and interactions is held
constant.
See also
--------
linearmodels.iv.covariance.HomoskedasticCovariance
linearmodels.iv.covariance.HeteroskedasticCovariance
linearmodels.iv.covariance.KernelCovariance
linearmodels.iv.covariance.ClusteredCovariance
"""
if lsmr_options is not None:
if absorb_options is not None:
raise ValueError("absorb_options cannot be used with lsmr_options")
warnings.warn(
"lsmr_options is deprecated. Use absorb_options.",
FutureWarning,
stacklevel=2,
)
absorb_options = {k: v for k, v in lsmr_options.items()}
if self._absorbed_dependent is None:
self._first_time_fit(use_cache, absorb_options, method)
exog_resid = self.absorbed_exog.to_numpy()
dep_resid = self.absorbed_dependent.to_numpy()
if self._exog.shape[1] == 0:
params = empty((0, 1))
else:
params = lstsq(exog_resid, dep_resid, rcond=None)[0]
self._num_params += exog_resid.shape[1]
cov_estimator = COVARIANCE_ESTIMATORS[cov_type]
cov_config["debiased"] = debiased
cov_config["kappa"] = 0.0
cov_config_copy = {k: v for k, v in cov_config.items()}
if "center" in cov_config_copy:
del cov_config_copy["center"]
cov_estimator_inst = cov_estimator(
exog_resid, dep_resid, exog_resid, params, **cov_config_copy
)
results = {"kappa": 0.0, "liml_kappa": 0.0}
pe = self._post_estimation(params, cov_estimator_inst, cov_type)
results.update(pe)
results["df_model"] = self._num_params
return AbsorbingLSResults(results, self)
def resids(self, params: Float64Array) -> Float64Array:
"""
Compute model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Model residuals
"""
resids = self.wresids(params)
return resids / sqrt(self.weights.ndarray)
def wresids(self, params: Float64Array) -> Float64Array:
"""
Compute weighted model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Weighted model residuals
Notes
-----
Uses weighted versions of data instead of raw data. Identical to
resids if all weights are unity.
"""
assert isinstance(self._absorbed_dependent, DataFrame)
assert isinstance(self._absorbed_exog, DataFrame)
return (
self._absorbed_dependent.to_numpy()
- self._absorbed_exog.to_numpy() @ params
)
def _f_statistic(
self, params: Float64Array, cov: Float64Array, debiased: bool
) -> WaldTestStatistic | InvalidTestStatistic:
const_loc = find_constant(cast(Float64Array, self._exog.ndarray))
resid_df = self._nobs - self._num_params
return f_statistic(params, cov, debiased, resid_df, const_loc)
def _post_estimation(
self,
params: Float64Array,
cov_estimator: (
HomoskedasticCovariance
| HeteroskedasticCovariance
| KernelCovariance
| ClusteredCovariance
),
cov_type: str,
) -> dict[str, Any]:
columns = self._columns
index = self._index
eps = self.resids(params)
fitted_values = self._dependent.ndarray - eps
fitted = DataFrameWrapper(
fitted_values,
index=self._dependent.rows,
columns=["fitted_values"],
)
assert isinstance(self._absorbed_dependent, DataFrame)
absorbed_effects = DataFrameWrapper(
self._absorbed_dependent.to_numpy() - fitted_values,
columns=["absorbed_effects"],
index=self._dependent.rows,
)
weps = self.wresids(params)
cov = cov_estimator.cov
debiased = cov_estimator.debiased
residual_ss = (weps.T @ weps)[0, 0]
w = self.weights.ndarray
root_w = sqrt(w)
e = self._dependent.ndarray * root_w
if self.has_constant:
e = e - root_w * average(self._dependent.ndarray, weights=w)
total_ss = float(squeeze(e.T @ e))
r2 = max(1 - residual_ss / total_ss, 0.0)
e = self._absorbed_dependent.to_numpy() # already scaled by root_w
# If absorbing contains a constant, but exog does not, no need to demean
assert isinstance(self._absorbed_exog, DataFrame)
if self._const_col is not None:
col = self._const_col
x = self._absorbed_exog.to_numpy()[:, col : col + 1]
mu = (lstsq(x, e, rcond=None)[0]).squeeze()
e = e - x * mu
aborbed_total_ss = float(squeeze(e.T @ e))
r2_absorbed = max(1 - residual_ss / aborbed_total_ss, 0.0)
fstat = self._f_statistic(params, cov, debiased)
out = {
"params": Series(params.squeeze(), columns, name="parameter"),
"eps": SeriesWrapper(eps.squeeze(), index=index, name="residual"),
"weps": SeriesWrapper(
weps.squeeze(), index=index, name="weighted residual"
),
"cov": DataFrame(cov, columns=columns, index=columns),
"s2": float(squeeze(cov_estimator.s2)),
"debiased": debiased,
"residual_ss": float(residual_ss),
"total_ss": float(total_ss),
"r2": float(r2),
"fstat": fstat,
"vars": columns,
"instruments": [],
"cov_config": cov_estimator.config,
"cov_type": cov_type,
"method": self._method,
"cov_estimator": cov_estimator,
"fitted": fitted,
"original_index": self._original_index,
"absorbed_effects": absorbed_effects,
"absorbed_r2": r2_absorbed,
}
return out
|
(dependent: 'ArrayLike', exog: 'ArrayLike | None' = None, *, absorb: 'InteractionVar | None' = None, interactions: 'InteractionVar | Iterable[InteractionVar] | None' = None, weights: 'ArrayLike | None' = None, drop_absorbed: 'bool' = False) -> 'None'
|
42,806 |
linearmodels.iv.absorbing
|
__init__
| null |
def __init__(
self,
dependent: ArrayLike,
exog: ArrayLike | None = None,
*,
absorb: InteractionVar | None = None,
interactions: InteractionVar | Iterable[InteractionVar] | None = None,
weights: ArrayLike | None = None,
drop_absorbed: bool = False,
) -> None:
self._dependent = IVData(dependent, "dependent")
self._nobs = nobs = self._dependent.shape[0]
self._exog = IVData(exog, "exog", nobs=self._nobs)
self._absorb = absorb
if isinstance(absorb, DataFrame):
self._absorb_inter = Interaction.from_frame(absorb)
elif absorb is None:
self._absorb_inter = Interaction(None, None, nobs)
elif isinstance(absorb, Interaction):
self._absorb_inter = absorb
else:
raise TypeError("absorb must ba a DataFrame or an Interaction")
self._weights = weights
self._is_weighted = False
self._drop_absorbed = drop_absorbed
self._check_weights()
self._interactions = interactions
self._interaction_list: list[Interaction] = []
self._prepare_interactions()
self._absorbed_dependent: DataFrame | None = None
self._absorbed_exog: DataFrame | None = None
self._check_shape()
self._original_index = self._dependent.pandas.index
self._drop_locs = self._drop_missing()
self._columns = self._exog.cols
self._index = self._dependent.rows
self._method = "Absorbing LS"
self._const_col = 0
self._has_constant = False
self._has_constant_exog = self._check_constant()
self._constant_absorbed = False
self._num_params = 0
self._regressors: sp.csc_matrix | None = None
self._regressors_hash: tuple[tuple[str, ...], ...] | None = None
|
(self, dependent: Union[numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], exog: Union[numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, *, absorb: Union[pandas.core.frame.DataFrame, linearmodels.iv.absorbing.Interaction, NoneType] = None, interactions: Union[pandas.core.frame.DataFrame, linearmodels.iv.absorbing.Interaction, collections.abc.Iterable[Union[pandas.core.frame.DataFrame, linearmodels.iv.absorbing.Interaction]], NoneType] = None, weights: Union[numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, drop_absorbed: bool = False) -> NoneType
|
42,807 |
linearmodels.iv.absorbing
|
_check_constant
| null |
def _check_constant(self) -> bool:
col_delta = ptp(self.exog.ndarray, 0)
has_constant = npany(col_delta == 0)
self._const_col = where(col_delta == 0)[0][0] if has_constant else None
return bool(has_constant)
|
(self) -> bool
|
42,808 |
linearmodels.iv.absorbing
|
_check_shape
| null |
def _check_shape(self) -> None:
nobs = self._nobs
if self._absorb is not None:
if self._absorb_inter.nobs != nobs:
raise ValueError(
"absorb and dependent have different number of observations"
)
for interact in self._interaction_list:
if interact.nobs != nobs:
raise ValueError(
"interactions ({}) and dependent have different number of "
"observations".format(str(interact))
)
|
(self) -> NoneType
|
42,809 |
linearmodels.iv.absorbing
|
_check_weights
| null |
def _check_weights(self) -> None:
if self._weights is None:
nobs = self._dependent.shape[0]
self._is_weighted = False
self._weight_data = IVData(ones(nobs), "weights")
else:
self._is_weighted = True
weights = IVData(self._weights).ndarray
weights = weights / nanmean(weights)
self._weight_data = IVData(weights, var_name="weights", nobs=self._nobs)
|
(self) -> NoneType
|
42,810 |
linearmodels.iv.absorbing
|
_drop_missing
| null |
def _drop_missing(self) -> BoolArray:
missing = require(self.dependent.isnull.to_numpy(), requirements="W")
missing |= self.exog.isnull.to_numpy()
missing |= self._absorb_inter.cat.isnull().any(axis=1).to_numpy()
missing |= self._absorb_inter.cont.isnull().any(axis=1).to_numpy()
for interact in self._interaction_list:
missing |= interact.isnull.to_numpy()
if npany(missing):
self.dependent.drop(missing)
self.exog.drop(missing)
self._absorb_inter.drop(missing)
for interact in self._interaction_list:
interact.drop(missing)
missing_warning(missing, stacklevel=4)
return missing
|
(self) -> numpy.ndarray
|
42,811 |
linearmodels.iv.absorbing
|
_f_statistic
| null |
def _f_statistic(
self, params: Float64Array, cov: Float64Array, debiased: bool
) -> WaldTestStatistic | InvalidTestStatistic:
const_loc = find_constant(cast(Float64Array, self._exog.ndarray))
resid_df = self._nobs - self._num_params
return f_statistic(params, cov, debiased, resid_df, const_loc)
|
(self, params: numpy.ndarray, cov: numpy.ndarray, debiased: bool) -> linearmodels.shared.hypotheses.WaldTestStatistic | linearmodels.shared.hypotheses.InvalidTestStatistic
|
42,812 |
linearmodels.iv.absorbing
|
_first_time_fit
| null |
def _first_time_fit(
self,
use_cache: bool,
absorb_options: None | (
dict[str, bool | float | str | ArrayLike | None | dict[str, Any]]
),
method: str,
) -> None:
weights = (
cast(Float64Array, self.weights.ndarray) if self._is_weighted else None
)
use_hdfe = weights is None and method in ("auto", "hdfe")
use_hdfe = use_hdfe and not self._absorb_inter.cont.shape[1]
use_hdfe = use_hdfe and not self._interaction_list
if not use_hdfe and method == "hdfe":
raise RuntimeError(
"HDFE has been set as the method but the model cannot be estimated "
"using HDFE. HDFE requires that the model is unweighted and that the "
"absorbed regressors include only fixed effects (dummy variables)."
)
areg = AbsorbingRegressor(
cat=self._absorb_inter.cat,
cont=self._absorb_inter.cont,
interactions=self._interaction_list,
weights=weights,
)
areg_constant = areg.has_constant
self._regressors = areg.regressors
self._num_params += areg.approx_rank
# Do not double count intercept-like terms
self._has_constant = self._has_constant_exog or areg_constant
self._num_params -= min(self._has_constant_exog, areg_constant)
self._regressors_hash = areg.hash
self._constant_absorbed = self._has_constant_exog and areg_constant
dep = self._dependent.ndarray
exog = cast(Float64Array, self._exog.ndarray)
root_w = sqrt(self._weight_data.ndarray)
dep = root_w * dep
exog = root_w * exog
denom = root_w.T @ root_w
mu_dep = (root_w.T @ dep) / denom
mu_exog = (root_w.T @ exog) / denom
absorb_options = {} if absorb_options is None else absorb_options
assert isinstance(self._regressors, sp.csc_matrix)
if self._regressors.shape[1] > 0:
if use_hdfe:
from pyhdfe import create
absorb_options["drop_singletons"] = False
algo = create(self._absorb_inter.cat, **absorb_options)
dep_exog = column_stack((dep, exog))
resids = algo.residualize(dep_exog)
dep_resid = resids[:, :1]
exog_resid = resids[:, 1:]
else:
self._regressors = preconditioner(self._regressors)[0]
dep_exog = column_stack((dep, exog))
resid = lsmr_annihilate(
self._regressors,
dep_exog,
use_cache,
self._regressors_hash,
**absorb_options,
)
dep_resid = resid[:, :1]
exog_resid = resid[:, 1:]
else:
dep_resid = dep
exog_resid = exog
if self._constant_absorbed:
dep_resid += root_w * mu_dep
exog_resid += root_w * mu_exog
if not self._drop_absorbed:
check_absorbed(exog_resid, self.exog.cols, exog)
else:
ncol = exog_resid.shape[1]
retain = not_absorbed(exog_resid)
if not retain:
raise ValueError(
"All columns in exog have been fully absorbed by the "
"included effects. This model cannot be estimated."
)
elif len(retain) < ncol:
drop = set(range(ncol)).difference(retain)
dropped = ", ".join([str(self.exog.cols[i]) for i in drop])
warnings.warn(
absorbing_warn_msg.format(absorbed_variables=dropped),
AbsorbingEffectWarning,
stacklevel=3,
)
exog_resid = exog_resid[:, retain]
self._columns = [self._columns[i] for i in retain]
self._absorbed_dependent = DataFrame(
dep_resid,
index=self._dependent.pandas.index,
columns=self._dependent.pandas.columns,
)
self._absorbed_exog = DataFrame(
exog_resid, index=self._exog.pandas.index, columns=self._columns
)
|
(self, use_cache: bool, absorb_options: None | dict[str, typing.Union[bool, float, str, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType, dict[str, typing.Any]]], method: str) -> NoneType
|
42,813 |
linearmodels.iv.absorbing
|
_post_estimation
| null |
def _post_estimation(
self,
params: Float64Array,
cov_estimator: (
HomoskedasticCovariance
| HeteroskedasticCovariance
| KernelCovariance
| ClusteredCovariance
),
cov_type: str,
) -> dict[str, Any]:
columns = self._columns
index = self._index
eps = self.resids(params)
fitted_values = self._dependent.ndarray - eps
fitted = DataFrameWrapper(
fitted_values,
index=self._dependent.rows,
columns=["fitted_values"],
)
assert isinstance(self._absorbed_dependent, DataFrame)
absorbed_effects = DataFrameWrapper(
self._absorbed_dependent.to_numpy() - fitted_values,
columns=["absorbed_effects"],
index=self._dependent.rows,
)
weps = self.wresids(params)
cov = cov_estimator.cov
debiased = cov_estimator.debiased
residual_ss = (weps.T @ weps)[0, 0]
w = self.weights.ndarray
root_w = sqrt(w)
e = self._dependent.ndarray * root_w
if self.has_constant:
e = e - root_w * average(self._dependent.ndarray, weights=w)
total_ss = float(squeeze(e.T @ e))
r2 = max(1 - residual_ss / total_ss, 0.0)
e = self._absorbed_dependent.to_numpy() # already scaled by root_w
# If absorbing contains a constant, but exog does not, no need to demean
assert isinstance(self._absorbed_exog, DataFrame)
if self._const_col is not None:
col = self._const_col
x = self._absorbed_exog.to_numpy()[:, col : col + 1]
mu = (lstsq(x, e, rcond=None)[0]).squeeze()
e = e - x * mu
aborbed_total_ss = float(squeeze(e.T @ e))
r2_absorbed = max(1 - residual_ss / aborbed_total_ss, 0.0)
fstat = self._f_statistic(params, cov, debiased)
out = {
"params": Series(params.squeeze(), columns, name="parameter"),
"eps": SeriesWrapper(eps.squeeze(), index=index, name="residual"),
"weps": SeriesWrapper(
weps.squeeze(), index=index, name="weighted residual"
),
"cov": DataFrame(cov, columns=columns, index=columns),
"s2": float(squeeze(cov_estimator.s2)),
"debiased": debiased,
"residual_ss": float(residual_ss),
"total_ss": float(total_ss),
"r2": float(r2),
"fstat": fstat,
"vars": columns,
"instruments": [],
"cov_config": cov_estimator.config,
"cov_type": cov_type,
"method": self._method,
"cov_estimator": cov_estimator,
"fitted": fitted,
"original_index": self._original_index,
"absorbed_effects": absorbed_effects,
"absorbed_r2": r2_absorbed,
}
return out
|
(self, params: numpy.ndarray, cov_estimator: linearmodels.iv.covariance.HomoskedasticCovariance | linearmodels.iv.covariance.HeteroskedasticCovariance | linearmodels.iv.covariance.KernelCovariance | linearmodels.iv.covariance.ClusteredCovariance, cov_type: str) -> dict[str, typing.Any]
|
42,814 |
linearmodels.iv.absorbing
|
_prepare_interactions
| null |
def _prepare_interactions(self) -> None:
if self._interactions is None:
return
elif isinstance(self._interactions, DataFrame):
self._interaction_list = [Interaction.from_frame(self._interactions)]
elif isinstance(self._interactions, Interaction):
self._interaction_list = [self._interactions]
else:
for interact in self._interactions:
if isinstance(interact, DataFrame):
self._interaction_list.append(Interaction.from_frame(interact))
elif isinstance(interact, Interaction):
self._interaction_list.append(interact)
else:
raise TypeError(
"interactions must contain DataFrames or Interactions"
)
|
(self) -> NoneType
|
42,815 |
linearmodels.iv.absorbing
|
fit
|
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator to use. Supported covariance
estimators are:
* "unadjusted", "homoskedastic" - Classic homoskedastic inference
* "robust", "heteroskedastic" - Heteroskedasticity robust inference
* "kernel" - Heteroskedasticity and autocorrelation robust
inference
* "cluster" - One-way cluster dependent inference.
Heteroskedasticity robust
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
method : str
One of:
* "auto" - (Default). Use HDFE when applicable and fallback to LSMR.
* "lsmr" - Force LSMR.
* "hdfe" - Force HDFE. Raises RuntimeError if the model contains
continuous variables or continuous-binary interactions to absorb or
if the model is weighted.
absorb_options : dict
Dictionary of options to pass to the absorber. Passed to either
scipy.sparse.linalg.lsmr or pyhdfe.create depending on the method used
to absorb the absorbed regressors.
use_cache : bool
Flag indicating whether the variables, once purged from the
absorbed variables and interactions, should be stored in the cache,
and retrieved if available. Cache can dramatically speed up
re-fitting large models when the set of absorbed variables and
interactions are identical.
lsmr_options : dict
Options to ass to scipy.sparse.linalg.lsmr.
.. deprecated:: 4.17
Use absorb_options to pass options
**cov_config
Additional parameters to pass to covariance estimator. The list
of optional parameters differ according to ``cov_type``. See
the documentation of the alternative covariance estimators for
the complete list of available commands.
Returns
-------
AbsorbingLSResults
Results container
Notes
-----
Additional covariance parameters depend on specific covariance used.
The see the docstring of specific covariance estimator for a list of
supported options. Defaults are used if no covariance configuration
is provided.
If use_cache is True, then variables are hashed based on their
contents using either a 64-bit value (if xxhash is installed) or
a 256-bit value. This allows variables to be reused in different
models if the set of absorbing variables and interactions is held
constant.
See also
--------
linearmodels.iv.covariance.HomoskedasticCovariance
linearmodels.iv.covariance.HeteroskedasticCovariance
linearmodels.iv.covariance.KernelCovariance
linearmodels.iv.covariance.ClusteredCovariance
|
def fit(
self,
*,
cov_type: str = "robust",
debiased: bool = False,
method: str = "auto",
absorb_options: None | (
dict[str, bool | float | str | ArrayLike | None | dict[str, Any]]
) = None,
use_cache: bool = True,
lsmr_options: dict[str, float | bool] | None = None,
**cov_config: Any,
) -> AbsorbingLSResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator to use. Supported covariance
estimators are:
* "unadjusted", "homoskedastic" - Classic homoskedastic inference
* "robust", "heteroskedastic" - Heteroskedasticity robust inference
* "kernel" - Heteroskedasticity and autocorrelation robust
inference
* "cluster" - One-way cluster dependent inference.
Heteroskedasticity robust
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
method : str
One of:
* "auto" - (Default). Use HDFE when applicable and fallback to LSMR.
* "lsmr" - Force LSMR.
* "hdfe" - Force HDFE. Raises RuntimeError if the model contains
continuous variables or continuous-binary interactions to absorb or
if the model is weighted.
absorb_options : dict
Dictionary of options to pass to the absorber. Passed to either
scipy.sparse.linalg.lsmr or pyhdfe.create depending on the method used
to absorb the absorbed regressors.
use_cache : bool
Flag indicating whether the variables, once purged from the
absorbed variables and interactions, should be stored in the cache,
and retrieved if available. Cache can dramatically speed up
re-fitting large models when the set of absorbed variables and
interactions are identical.
lsmr_options : dict
Options to ass to scipy.sparse.linalg.lsmr.
.. deprecated:: 4.17
Use absorb_options to pass options
**cov_config
Additional parameters to pass to covariance estimator. The list
of optional parameters differ according to ``cov_type``. See
the documentation of the alternative covariance estimators for
the complete list of available commands.
Returns
-------
AbsorbingLSResults
Results container
Notes
-----
Additional covariance parameters depend on specific covariance used.
The see the docstring of specific covariance estimator for a list of
supported options. Defaults are used if no covariance configuration
is provided.
If use_cache is True, then variables are hashed based on their
contents using either a 64-bit value (if xxhash is installed) or
a 256-bit value. This allows variables to be reused in different
models if the set of absorbing variables and interactions is held
constant.
See also
--------
linearmodels.iv.covariance.HomoskedasticCovariance
linearmodels.iv.covariance.HeteroskedasticCovariance
linearmodels.iv.covariance.KernelCovariance
linearmodels.iv.covariance.ClusteredCovariance
"""
if lsmr_options is not None:
if absorb_options is not None:
raise ValueError("absorb_options cannot be used with lsmr_options")
warnings.warn(
"lsmr_options is deprecated. Use absorb_options.",
FutureWarning,
stacklevel=2,
)
absorb_options = {k: v for k, v in lsmr_options.items()}
if self._absorbed_dependent is None:
self._first_time_fit(use_cache, absorb_options, method)
exog_resid = self.absorbed_exog.to_numpy()
dep_resid = self.absorbed_dependent.to_numpy()
if self._exog.shape[1] == 0:
params = empty((0, 1))
else:
params = lstsq(exog_resid, dep_resid, rcond=None)[0]
self._num_params += exog_resid.shape[1]
cov_estimator = COVARIANCE_ESTIMATORS[cov_type]
cov_config["debiased"] = debiased
cov_config["kappa"] = 0.0
cov_config_copy = {k: v for k, v in cov_config.items()}
if "center" in cov_config_copy:
del cov_config_copy["center"]
cov_estimator_inst = cov_estimator(
exog_resid, dep_resid, exog_resid, params, **cov_config_copy
)
results = {"kappa": 0.0, "liml_kappa": 0.0}
pe = self._post_estimation(params, cov_estimator_inst, cov_type)
results.update(pe)
results["df_model"] = self._num_params
return AbsorbingLSResults(results, self)
|
(self, *, cov_type: str = 'robust', debiased: bool = False, method: str = 'auto', absorb_options: Optional[dict[str, Union[bool, float, str, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType, dict[str, Any]]]] = None, use_cache: bool = True, lsmr_options: Optional[dict[str, float | bool]] = None, **cov_config: Any) -> linearmodels.iv.results.AbsorbingLSResults
|
42,816 |
linearmodels.iv.absorbing
|
resids
|
Compute model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Model residuals
|
def resids(self, params: Float64Array) -> Float64Array:
"""
Compute model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Model residuals
"""
resids = self.wresids(params)
return resids / sqrt(self.weights.ndarray)
|
(self, params: numpy.ndarray) -> numpy.ndarray
|
42,817 |
linearmodels.iv.absorbing
|
wresids
|
Compute weighted model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Weighted model residuals
Notes
-----
Uses weighted versions of data instead of raw data. Identical to
resids if all weights are unity.
|
def wresids(self, params: Float64Array) -> Float64Array:
"""
Compute weighted model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Weighted model residuals
Notes
-----
Uses weighted versions of data instead of raw data. Identical to
resids if all weights are unity.
"""
assert isinstance(self._absorbed_dependent, DataFrame)
assert isinstance(self._absorbed_exog, DataFrame)
return (
self._absorbed_dependent.to_numpy()
- self._absorbed_exog.to_numpy() @ params
)
|
(self, params: numpy.ndarray) -> numpy.ndarray
|
42,818 |
linearmodels.panel.model
|
BetweenOLS
|
Between estimator for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
\bar{y}_{i}= \beta^{\prime}\bar{x}_{i}+\bar{\epsilon}_{i}
where :math:`\bar{z}` is the time-average.
|
class BetweenOLS(_PanelModelBase):
r"""
Between estimator for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
\bar{y}_{i}= \beta^{\prime}\bar{x}_{i}+\bar{\epsilon}_{i}
where :math:`\bar{z}` is the time-average.
"""
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
) -> None:
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
self._cov_estimators = CovarianceManager(
self.__class__.__name__,
HomoskedasticCovariance,
HeteroskedasticCovariance,
ClusteredCovariance,
)
def _setup_clusters(
self,
cov_config: Mapping[str, bool | float | str | IntArray | DataFrame | PanelData],
) -> dict[str, bool | float | str | IntArray | DataFrame | PanelData]:
"""Return covariance estimator reformat clusters"""
cov_config_upd = dict(cov_config)
if "clusters" not in cov_config:
return cov_config_upd
clusters = cov_config.get("clusters", None)
if clusters is not None:
cluster_data = cast(Union[IntArray, DataFrame, PanelData], clusters)
clusters_panel = self.reformat_clusters(cluster_data)
cluster_max = np.nanmax(clusters_panel.values3d, axis=1)
delta = cluster_max - np.nanmin(clusters_panel.values3d, axis=1)
if np.any(delta != 0):
raise ValueError("clusters must not vary within an entity")
index = clusters_panel.panel.minor_axis
reindex = clusters_panel.entities
clusters_frame = DataFrame(
cluster_max.T, index=index, columns=clusters_panel.vars
)
# TODO: Bug in pandas-stubs prevents using Hashable | None
clusters_frame = clusters_frame.loc[reindex].astype(
np.int64
) # type: ignore
cov_config_upd["clusters"] = clusters_frame
return cov_config_upd
def fit(
self,
*,
reweight: bool = False,
cov_type: str = "unadjusted",
debiased: bool = True,
**cov_config: bool | float | str | IntArray | DataFrame | PanelData,
) -> PanelResults:
"""
Estimate model parameters
Parameters
----------
reweight : bool
Flag indicating to reweight observations if the input data is
unbalanced using a WLS estimator. If weights are provided, these
are accounted for when reweighting. Has no effect on balanced data.
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import BetweenOLS
>>> mod = BetweenOLS(y, x)
>>> res = mod.fit(cov_type='robust')
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - One- or two-way clustering. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
When using a clustered covariance estimator, all cluster ids must be
identical within an entity.
"""
y, x, w = self._prepare_between()
if np.all(self.weights.values2d == 1.0) and not reweight:
w = root_w = np.ones_like(y)
else:
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
params = _lstsq(wx, wy, rcond=None)[0]
df_resid = y.shape[0] - x.shape[1]
df_model = (x.shape[1],)
nobs = y.shape[0]
cov_config = self._setup_clusters(cov_config)
extra_df = 0
if "extra_df" in cov_config:
cov_config = cov_config.copy()
_extra_df = cov_config.pop("extra_df")
assert isinstance(_extra_df, (str, int))
extra_df = int(_extra_df)
cov = setup_covariance_estimator(
self._cov_estimators,
cov_type,
wy,
wx,
params,
self.dependent.entity_ids,
self.dependent.time_ids,
debiased=debiased,
extra_df=extra_df,
**cov_config,
)
weps = wy - wx @ params
index = self.dependent.index
fitted = DataFrame(self.exog.values2d @ params, index, ["fitted_values"])
eps = y - x @ params
effects = DataFrame(eps, self.dependent.entities, ["estimated_effects"])
idx = cast(MultiIndex, fitted.index)
entities = idx.levels[0][idx.codes[0]]
effects = effects.loc[entities]
effects.index = idx
dep = self.dependent.dataframe
fitted = fitted.reindex(dep.index)
effects = effects.reindex(dep.index)
idiosyncratic = DataFrame(
np.asarray(dep) - np.asarray(fitted) - np.asarray(effects),
dep.index,
["idiosyncratic"],
)
residual_ss = float(np.squeeze(weps.T @ weps))
e = y
if self._constant:
e = y - (w * y).sum() / w.sum()
total_ss = float(np.squeeze(w.T @ (e**2)))
r2 = 1 - residual_ss / total_ss
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
res.update(
dict(
df_resid=df_resid,
df_model=df_model,
nobs=nobs,
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
wresids=weps,
resids=eps,
index=self.dependent.entities,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
)
)
return PanelResults(res)
@classmethod
def from_formula(
cls,
formula: str,
data: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
) -> BetweenOLS:
"""
Create a model from a formula
Parameters
----------
formula : str
Formula to transform into model. Conforms to formulaic formula
rules.
data : array_like
Data structure that can be coerced into a PanelData. In most
cases, this should be a multi-index DataFrame where the level 0
index contains the entities and the level 1 contains the time.
weights: array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual times
the weight should be homoskedastic.
check_rank : bool
Flag indicating whether to perform a rank check on the exogenous
variables to ensure that the model is identified. Skipping this
check can reduce the time required to validate a model
specification. Results may be numerically unstable if this check
is skipped and the matrix is not full rank.
Returns
-------
BetweenOLS
Model specified using the formula
Notes
-----
Unlike standard formula syntax, it is necessary to explicitly include
a constant using the constant indicator (1)
Examples
--------
>>> from linearmodels import BetweenOLS
>>> from linearmodels.panel import generate_panel_data
>>> panel_data = generate_panel_data()
>>> mod = BetweenOLS.from_formula("y ~ 1 + x1", panel_data.data)
>>> res = mod.fit()
"""
parser = PanelFormulaParser(formula, data, context=capture_context(1))
dependent, exog = parser.data
mod = cls(dependent, exog, weights=weights, check_rank=check_rank)
mod.formula = formula
return mod
|
(dependent: 'PanelDataLike', exog: 'PanelDataLike', *, weights: 'PanelDataLike | None' = None, check_rank: 'bool' = True) -> 'None'
|
42,819 |
linearmodels.panel.model
|
__init__
| null |
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
) -> None:
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
self._cov_estimators = CovarianceManager(
self.__class__.__name__,
HomoskedasticCovariance,
HeteroskedasticCovariance,
ClusteredCovariance,
)
|
(self, dependent: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], exog: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], *, weights: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, check_rank: bool = True) -> NoneType
|
42,820 |
linearmodels.panel.model
|
__repr__
| null |
def __repr__(self) -> str:
return self.__str__() + "\nid: " + str(hex(id(self)))
|
(self) -> str
|
42,821 |
linearmodels.panel.model
|
__str__
| null |
def __str__(self) -> str:
out = "{name} \nNum exog: {num_exog}, Constant: {has_constant}"
return out.format(
name=self.__class__.__name__,
num_exog=self.exog.dataframe.shape[1],
has_constant=self.has_constant,
)
|
(self) -> str
|
42,822 |
linearmodels.panel.model
|
_adapt_weights
|
Check and transform weights depending on size
|
def _adapt_weights(self, weights: PanelDataLike | None) -> PanelData:
"""Check and transform weights depending on size"""
if weights is None:
self._is_weighted = False
frame = self.dependent.dataframe.copy()
frame.iloc[:, :] = 1
# TODO: Remove once pandas typing fixed
frame.columns = Index(["weight"])
return PanelData(frame)
frame = DataFrame(columns=self.dependent.entities, index=self.dependent.time)
nobs, nentity = self.exog.nobs, self.exog.nentity
if weights.ndim == 3 or weights.shape == (nobs, nentity):
return PanelData(weights)
if isinstance(weights, np.ndarray):
weights = cast(Float64Array, np.squeeze(weights))
if weights.shape[0] == nobs and nobs == nentity:
raise AmbiguityError(
"Unable to distinguish nobs form nentity since they are "
"equal. You must use an 2-d array to avoid ambiguity."
)
if (
isinstance(weights, (Series, DataFrame))
and isinstance(weights.index, MultiIndex)
and weights.shape[0] == self.dependent.dataframe.shape[0]
):
frame = DataFrame(weights)
elif weights.shape[0] == nobs:
weights_arr = np.asarray(weights)[:, None]
weights_arr = weights_arr @ np.ones((1, nentity))
frame.iloc[:, :] = weights_arr
elif weights.shape[0] == nentity:
weights_arr = np.asarray(weights)[None, :]
weights_arr = np.ones((nobs, 1)) @ weights_arr
frame.iloc[:, :] = weights_arr
elif weights.shape[0] == nentity * nobs:
frame = self.dependent.dataframe.copy()
frame.iloc[:, :] = np.asarray(weights)[:, None]
else:
raise ValueError("Weights do not have a supported shape.")
return PanelData(frame)
|
(self, weights: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType]) -> linearmodels.panel.data.PanelData
|
42,823 |
linearmodels.panel.model
|
_check_exog_rank
| null |
def _check_exog_rank(self) -> int:
if not self._check_rank:
return self.exog.shape[1]
x = cast(Float64Array, self.exog.values2d)
_, _, rank_of_x, _ = _lstsq(x, np.ones(x.shape[0]))
if rank_of_x < x.shape[1]:
raise ValueError(
"exog does not have full column rank. If you wish to proceed with "
"model estimation irrespective of the numerical accuracy of "
"coefficient estimates, you can set check_rank=False."
)
return rank_of_x
|
(self) -> int
|
42,824 |
linearmodels.panel.model
|
_f_statistic
|
Compute model F-statistic
|
def _f_statistic(
self,
weps: Float64Array,
y: Float64Array,
x: Float64Array,
root_w: Float64Array,
df_resid: int,
) -> WaldTestStatistic | InvalidTestStatistic:
"""Compute model F-statistic"""
weps_const = y
num_df = x.shape[1]
name = "Model F-statistic (homoskedastic)"
if self.has_constant:
if num_df == 1:
return InvalidTestStatistic("Model contains only a constant", name=name)
num_df -= 1
weps_const = cast(
Float64Array,
y - float(np.squeeze((root_w.T @ y) / (root_w.T @ root_w))),
)
resid_ss = float(np.squeeze(weps.T @ weps))
num = float(np.squeeze(weps_const.T @ weps_const - resid_ss))
denom = resid_ss
denom_df = df_resid
stat = float((num / num_df) / (denom / denom_df)) if denom > 0.0 else 0.0
return WaldTestStatistic(
stat,
null="All parameters ex. constant are zero",
df=num_df,
df_denom=denom_df,
name=name,
)
|
(self, weps: numpy.ndarray, y: numpy.ndarray, x: numpy.ndarray, root_w: numpy.ndarray, df_resid: int) -> linearmodels.shared.hypotheses.WaldTestStatistic | linearmodels.shared.hypotheses.InvalidTestStatistic
|
42,825 |
linearmodels.panel.model
|
_f_statistic_robust
|
Compute Wald test that all parameters are 0, ex. constant
|
def _f_statistic_robust(
self,
params: Float64Array,
) -> FInfo:
"""Compute Wald test that all parameters are 0, ex. constant"""
sel = np.ones(params.shape[0], dtype=bool)
name = "Model F-statistic (robust)"
if self.has_constant:
if len(sel) == 1:
return FInfo(
sel,
name,
InvalidTestStatistic("Model contains only a constant", name=name),
True,
)
assert isinstance(self._constant_index, int)
sel[self._constant_index] = False
return FInfo(sel, name, None, False)
|
(self, params: numpy.ndarray) -> linearmodels.panel.model.FInfo
|
42,826 |
linearmodels.panel.model
|
_info
|
Information about panel structure
|
def _info(self) -> tuple[Series, Series, DataFrame | None]:
"""Information about panel structure"""
entity_info = panel_structure_stats(
self.dependent.entity_ids.squeeze(), "Observations per entity"
)
time_info = panel_structure_stats(
self.dependent.time_ids.squeeze(), "Observations per time period"
)
other_info = None
return entity_info, time_info, other_info
|
(self) -> tuple[pandas.core.series.Series, pandas.core.series.Series, pandas.core.frame.DataFrame | None]
|
42,827 |
linearmodels.panel.model
|
_postestimation
|
Common post-estimation values
|
def _postestimation(
self,
params: Float64Array,
cov: CovarianceEstimator,
debiased: bool,
df_resid: int,
weps: Float64Array,
y: Float64Array,
x: Float64Array,
root_w: Float64Array,
) -> AttrDict:
"""Common post-estimation values"""
f_info = self._f_statistic_robust(params)
f_stat = self._f_statistic(weps, y, x, root_w, df_resid)
r2o, r2w, r2b = self._rsquared(params)
c2o, c2w, c2b = self._rsquared_corr(params)
f_pooled = InapplicableTestStatistic(
reason="Model has no effects", name="Pooled F-stat"
)
entity_info, time_info, other_info = self._info()
nobs = weps.shape[0]
sigma2 = float(np.squeeze(weps.T @ weps) / nobs)
if sigma2 > 0.0:
loglik = -0.5 * nobs * (np.log(2 * np.pi) + np.log(sigma2) + 1)
else:
loglik = np.nan
res = AttrDict(
params=params,
deferred_cov=cov.deferred_cov,
f_info=f_info,
f_stat=f_stat,
debiased=debiased,
name=self._name,
var_names=self.exog.vars,
r2w=r2w,
r2b=r2b,
r2=r2w,
r2o=r2o,
c2o=c2o,
c2b=c2b,
c2w=c2w,
s2=cov.s2,
model=self,
cov_type=cov.name,
index=self.dependent.index,
entity_info=entity_info,
time_info=time_info,
other_info=other_info,
f_pooled=f_pooled,
loglik=loglik,
not_null=self._not_null,
original_index=self._original_index,
)
return res
|
(self, params: numpy.ndarray, cov: Union[linearmodels.panel.covariance.ACCovariance, linearmodels.panel.covariance.ClusteredCovariance, linearmodels.panel.covariance.DriscollKraay, linearmodels.panel.covariance.HeteroskedasticCovariance, linearmodels.panel.covariance.HomoskedasticCovariance], debiased: bool, df_resid: int, weps: numpy.ndarray, y: numpy.ndarray, x: numpy.ndarray, root_w: numpy.ndarray) -> linearmodels.shared.utility.AttrDict
|
42,828 |
linearmodels.panel.model
|
_prepare_between
|
Prepare values for between estimation of R2
|
def _prepare_between(self) -> tuple[Float64Array, Float64Array, Float64Array]:
"""Prepare values for between estimation of R2"""
weights = self.weights if self._is_weighted else None
y = np.asarray(self.dependent.mean("entity", weights=weights))
x = np.asarray(self.exog.mean("entity", weights=weights))
# Weight transformation
wcount, wmean = self.weights.count("entity"), self.weights.mean("entity")
wsum = wcount * wmean
w = np.asarray(wsum)
w = w / w.mean()
return y, x, w
|
(self) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
|
42,829 |
linearmodels.panel.model
|
_rsquared
|
Compute alternative measures of R2
|
def _rsquared(
self, params: Float64Array, reweight: bool = False
) -> tuple[float, float, float]:
"""Compute alternative measures of R2"""
if self.has_constant and self.exog.nvar == 1:
# Constant only fast track
return 0.0, 0.0, 0.0
#############################################
# R2 - Between
#############################################
y, x, w = self._prepare_between()
if np.all(self.weights.values2d == 1.0) and not reweight:
w = root_w = np.ones_like(w)
else:
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
weps = wy - wx @ params
residual_ss = float(np.squeeze(weps.T @ weps))
e = y
if self.has_constant:
e = y - (w * y).sum() / w.sum()
total_ss = float(np.squeeze(w.T @ (e**2)))
r2b = 1 - residual_ss / total_ss if total_ss > 0.0 else 0.0
#############################################
# R2 - Overall
#############################################
y = self.dependent.values2d
x = self.exog.values2d
w = self.weights.values2d
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
weps = wy - wx @ params
residual_ss = float(np.squeeze(weps.T @ weps))
mu = (w * y).sum() / w.sum() if self.has_constant else 0
we = wy - root_w * mu
total_ss = float(np.squeeze(we.T @ we))
r2o = 1 - residual_ss / total_ss if total_ss > 0.0 else 0.0
#############################################
# R2 - Within
#############################################
weights = self.weights if self._is_weighted else None
wy = cast(
Float64Array,
self.dependent.demean("entity", weights=weights, return_panel=False),
)
wx = cast(
Float64Array,
self.exog.demean("entity", weights=weights, return_panel=False),
)
assert isinstance(wy, np.ndarray)
assert isinstance(wx, np.ndarray)
weps = wy - wx @ params
residual_ss = float(np.squeeze(weps.T @ weps))
total_ss = float(np.squeeze(wy.T @ wy))
if self.dependent.nobs == 1 or (self.exog.nvar == 1 and self.has_constant):
r2w = 0.0
else:
r2w = 1.0 - residual_ss / total_ss if total_ss > 0.0 else 0.0
return r2o, r2w, r2b
|
(self, params: numpy.ndarray, reweight: bool = False) -> tuple[float, float, float]
|
42,830 |
linearmodels.panel.model
|
_rsquared_corr
|
Correlation-based measures of R2
|
def _rsquared_corr(self, params: Float64Array) -> tuple[float, float, float]:
"""Correlation-based measures of R2"""
# Overall
y = self.dependent.values2d
x = self.exog.values2d
xb = x @ params
r2o = 0.0
if y.std() > 0 and xb.std() > 0:
r2o = np.corrcoef(y.T, (x @ params).T)[0, 1]
# Between
y = np.asarray(self.dependent.mean("entity"))
x = np.asarray(self.exog.mean("entity"))
xb = x @ params
r2b = 0.0
if y.std() > 0 and xb.std() > 0:
r2b = np.corrcoef(y.T, (x @ params).T)[0, 1]
# Within
y = self.dependent.demean("entity", return_panel=False)
x = self.exog.demean("entity", return_panel=False)
xb = x @ params
r2w = 0.0
if y.std() > 0 and xb.std() > 0:
r2w = np.corrcoef(y.T, xb.T)[0, 1]
return r2o**2, r2w**2, r2b**2
|
(self, params: numpy.ndarray) -> tuple[float, float, float]
|
42,831 |
linearmodels.panel.model
|
_setup_clusters
|
Return covariance estimator reformat clusters
|
def _setup_clusters(
self,
cov_config: Mapping[str, bool | float | str | IntArray | DataFrame | PanelData],
) -> dict[str, bool | float | str | IntArray | DataFrame | PanelData]:
"""Return covariance estimator reformat clusters"""
cov_config_upd = dict(cov_config)
if "clusters" not in cov_config:
return cov_config_upd
clusters = cov_config.get("clusters", None)
if clusters is not None:
cluster_data = cast(Union[IntArray, DataFrame, PanelData], clusters)
clusters_panel = self.reformat_clusters(cluster_data)
cluster_max = np.nanmax(clusters_panel.values3d, axis=1)
delta = cluster_max - np.nanmin(clusters_panel.values3d, axis=1)
if np.any(delta != 0):
raise ValueError("clusters must not vary within an entity")
index = clusters_panel.panel.minor_axis
reindex = clusters_panel.entities
clusters_frame = DataFrame(
cluster_max.T, index=index, columns=clusters_panel.vars
)
# TODO: Bug in pandas-stubs prevents using Hashable | None
clusters_frame = clusters_frame.loc[reindex].astype(
np.int64
) # type: ignore
cov_config_upd["clusters"] = clusters_frame
return cov_config_upd
|
(self, cov_config: collections.abc.Mapping[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]) -> dict[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]
|
42,832 |
linearmodels.panel.model
|
_validate_data
|
Check input shape and remove missing
|
def _validate_data(self) -> None:
"""Check input shape and remove missing"""
y = self._y = cast(Float64Array, self.dependent.values2d)
x = self._x = cast(Float64Array, self.exog.values2d)
w = self._w = cast(Float64Array, self.weights.values2d)
if y.shape[0] != x.shape[0]:
raise ValueError(
"dependent and exog must have the same number of "
"observations. The number of observations in dependent "
f"is {y.shape[0]}, and the number of observations in exog "
f"is {x.shape[0]}."
)
if y.shape[0] != w.shape[0]:
raise ValueError(
"weights must have the same number of " "observations as dependent."
)
all_missing = np.any(np.isnan(y), axis=1) & np.all(np.isnan(x), axis=1)
missing = (
np.any(np.isnan(y), axis=1)
| np.any(np.isnan(x), axis=1)
| np.any(np.isnan(w), axis=1)
)
missing_warning(np.asarray(all_missing ^ missing), stacklevel=4)
if np.any(missing):
self.dependent.drop(missing)
self.exog.drop(missing)
self.weights.drop(missing)
x = cast(Float64Array, self.exog.values2d)
self._not_null = np.asarray(~missing)
w_df = self.weights.dataframe
if np.any(np.asarray(w_df) <= 0):
raise ValueError("weights must be strictly positive.")
w_df = w_df / w_df.mean()
self.weights = PanelData(w_df)
rank_of_x = self._check_exog_rank()
self._constant, self._constant_index = has_constant(x, rank_of_x)
|
(self) -> NoneType
|
42,833 |
linearmodels.panel.model
|
fit
|
Estimate model parameters
Parameters
----------
reweight : bool
Flag indicating to reweight observations if the input data is
unbalanced using a WLS estimator. If weights are provided, these
are accounted for when reweighting. Has no effect on balanced data.
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import BetweenOLS
>>> mod = BetweenOLS(y, x)
>>> res = mod.fit(cov_type='robust')
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - One- or two-way clustering. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
When using a clustered covariance estimator, all cluster ids must be
identical within an entity.
|
def fit(
self,
*,
reweight: bool = False,
cov_type: str = "unadjusted",
debiased: bool = True,
**cov_config: bool | float | str | IntArray | DataFrame | PanelData,
) -> PanelResults:
"""
Estimate model parameters
Parameters
----------
reweight : bool
Flag indicating to reweight observations if the input data is
unbalanced using a WLS estimator. If weights are provided, these
are accounted for when reweighting. Has no effect on balanced data.
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import BetweenOLS
>>> mod = BetweenOLS(y, x)
>>> res = mod.fit(cov_type='robust')
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - One- or two-way clustering. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
When using a clustered covariance estimator, all cluster ids must be
identical within an entity.
"""
y, x, w = self._prepare_between()
if np.all(self.weights.values2d == 1.0) and not reweight:
w = root_w = np.ones_like(y)
else:
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
params = _lstsq(wx, wy, rcond=None)[0]
df_resid = y.shape[0] - x.shape[1]
df_model = (x.shape[1],)
nobs = y.shape[0]
cov_config = self._setup_clusters(cov_config)
extra_df = 0
if "extra_df" in cov_config:
cov_config = cov_config.copy()
_extra_df = cov_config.pop("extra_df")
assert isinstance(_extra_df, (str, int))
extra_df = int(_extra_df)
cov = setup_covariance_estimator(
self._cov_estimators,
cov_type,
wy,
wx,
params,
self.dependent.entity_ids,
self.dependent.time_ids,
debiased=debiased,
extra_df=extra_df,
**cov_config,
)
weps = wy - wx @ params
index = self.dependent.index
fitted = DataFrame(self.exog.values2d @ params, index, ["fitted_values"])
eps = y - x @ params
effects = DataFrame(eps, self.dependent.entities, ["estimated_effects"])
idx = cast(MultiIndex, fitted.index)
entities = idx.levels[0][idx.codes[0]]
effects = effects.loc[entities]
effects.index = idx
dep = self.dependent.dataframe
fitted = fitted.reindex(dep.index)
effects = effects.reindex(dep.index)
idiosyncratic = DataFrame(
np.asarray(dep) - np.asarray(fitted) - np.asarray(effects),
dep.index,
["idiosyncratic"],
)
residual_ss = float(np.squeeze(weps.T @ weps))
e = y
if self._constant:
e = y - (w * y).sum() / w.sum()
total_ss = float(np.squeeze(w.T @ (e**2)))
r2 = 1 - residual_ss / total_ss
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
res.update(
dict(
df_resid=df_resid,
df_model=df_model,
nobs=nobs,
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
wresids=weps,
resids=eps,
index=self.dependent.entities,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
)
)
return PanelResults(res)
|
(self, *, reweight: bool = False, cov_type: str = 'unadjusted', debiased: bool = True, **cov_config: bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData) -> linearmodels.panel.results.PanelResults
|
42,834 |
linearmodels.panel.model
|
predict
|
Predict values for additional data
Parameters
----------
params : array_like
Model parameters (nvar by 1)
exog : array_like
Exogenous regressors (nobs by nvar)
data : DataFrame
Values to use when making predictions from a model constructed
from a formula
context : int
Depth of use when evaluating formulas.
Returns
-------
DataFrame
Fitted values from supplied data and parameters
Notes
-----
If `data` is not None, then `exog` must be None.
Predictions from models constructed using formulas can
be computed using either `exog`, which will treat these are
arrays of values corresponding to the formula-processed data, or using
`data` which will be processed using the formula used to construct the
values corresponding to the original model specification.
|
def predict(
self,
params: ArrayLike,
*,
exog: PanelDataLike | None = None,
data: PanelDataLike | None = None,
eval_env: int = 1,
context: Mapping[str, Any] | None = None,
) -> DataFrame:
"""
Predict values for additional data
Parameters
----------
params : array_like
Model parameters (nvar by 1)
exog : array_like
Exogenous regressors (nobs by nvar)
data : DataFrame
Values to use when making predictions from a model constructed
from a formula
context : int
Depth of use when evaluating formulas.
Returns
-------
DataFrame
Fitted values from supplied data and parameters
Notes
-----
If `data` is not None, then `exog` must be None.
Predictions from models constructed using formulas can
be computed using either `exog`, which will treat these are
arrays of values corresponding to the formula-processed data, or using
`data` which will be processed using the formula used to construct the
values corresponding to the original model specification.
"""
if data is not None and self.formula is None:
raise ValueError(
"Unable to use data when the model was not " "created using a formula."
)
if data is not None and exog is not None:
raise ValueError(
"Predictions can only be constructed using one "
"of exog or data, but not both."
)
if exog is not None:
exog = PanelData(exog).dataframe
else:
assert self._formula is not None
assert data is not None
if context is None:
context = capture_context(eval_env)
parser = PanelFormulaParser(self._formula, data, context=context)
exog = parser.exog
x = exog.values
params = np.atleast_2d(np.asarray(params))
if params.shape[0] == 1:
params = params.T
if x.shape[1] != params.shape[0]:
raise ValueError(
EXOG_PREDICT_MSG.format(
x_shape=x.shape[1], params_shape=params.shape[0]
)
)
pred = DataFrame(x @ params, index=exog.index, columns=["predictions"])
return pred
|
(self, params: Union[numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], *, exog: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, data: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, eval_env: int = 1, context: Optional[collections.abc.Mapping[str, Any]] = None) -> pandas.core.frame.DataFrame
|
42,835 |
linearmodels.panel.model
|
reformat_clusters
|
Reformat cluster variables
Parameters
----------
clusters : array_like
Values to use for variance clustering
Returns
-------
PanelData
Original data with matching axis and observation dropped where
missing in the model data.
Notes
-----
This is exposed for testing and is not normally needed for estimation
|
def reformat_clusters(self, clusters: IntArray | PanelDataLike) -> PanelData:
"""
Reformat cluster variables
Parameters
----------
clusters : array_like
Values to use for variance clustering
Returns
-------
PanelData
Original data with matching axis and observation dropped where
missing in the model data.
Notes
-----
This is exposed for testing and is not normally needed for estimation
"""
clusters_pd = PanelData(clusters, var_name="cov.cluster", convert_dummies=False)
if clusters_pd.shape[1:] != self._original_shape[1:]:
raise ValueError(
"clusters must have the same number of entities "
"and time periods as the model data."
)
clusters_pd.drop(~self.not_null)
return clusters_pd.copy()
|
(self, clusters: Union[numpy.ndarray, linearmodels.panel.data.PanelData, pandas.core.frame.DataFrame, pandas.core.series.Series]) -> linearmodels.panel.data.PanelData
|
42,836 |
linearmodels.panel.model
|
FamaMacBeth
|
Pooled coefficient estimator for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
y_{it}=\beta^{\prime}x_{it}+\epsilon_{it}
The Fama-MacBeth estimator is computed by performing T regressions, one
for each time period using all available entity observations. Denote the
estimate of the model parameters as :math:`\hat{\beta}_t`. The reported
estimator is then
.. math::
\hat{\beta} = T^{-1}\sum_{t=1}^T \hat{\beta}_t
While the model does not explicitly include time-effects, the
implementation based on regressing all observation in a single
time period is "as-if" time effects are included.
Parameter inference is made using the set of T parameter estimates with
either the standard covariance estimator or a kernel-based covariance,
depending on ``cov_type``.
|
class FamaMacBeth(_PanelModelBase):
r"""
Pooled coefficient estimator for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
y_{it}=\beta^{\prime}x_{it}+\epsilon_{it}
The Fama-MacBeth estimator is computed by performing T regressions, one
for each time period using all available entity observations. Denote the
estimate of the model parameters as :math:`\hat{\beta}_t`. The reported
estimator is then
.. math::
\hat{\beta} = T^{-1}\sum_{t=1}^T \hat{\beta}_t
While the model does not explicitly include time-effects, the
implementation based on regressing all observation in a single
time period is "as-if" time effects are included.
Parameter inference is made using the set of T parameter estimates with
either the standard covariance estimator or a kernel-based covariance,
depending on ``cov_type``.
"""
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
):
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
self._validate_blocks()
def _validate_blocks(self) -> None:
x = self._x
root_w = np.sqrt(self._w)
wx = root_w * x
exog = self.exog.dataframe
wx_df = DataFrame(
wx[self._not_null], index=exog.notnull().index, columns=exog.columns
)
def validate_block(ex: Float64Array | DataFrame) -> bool:
_ex = np.asarray(ex, dtype=float)
def _mr(ex: Float64Array) -> int:
"""lstsq based matrix_rank"""
return _lstsq(ex, np.ones(ex.shape[0]))[2]
return _ex.shape[0] >= _ex.shape[1] and _mr(_ex) == _ex.shape[1]
valid_blocks = wx_df.groupby(level=1).apply(validate_block)
if not valid_blocks.any():
err = (
"Model cannot be estimated. All blocks of time-series observations "
"are rank deficient, and so it is not possible to estimate any"
"cross-sectional regressions."
)
raise ValueError(err)
if valid_blocks.sum() < exog.shape[1]:
import warnings
warnings.warn(
"The number of time-series observation available to estimate "
"cross-sectional\nregressions, {}, is less than the number of "
"parameters in the model. Parameter\ninference is not "
"available.".format(valid_blocks.sum()),
InferenceUnavailableWarning,
stacklevel=3,
)
elif valid_blocks.sum() < valid_blocks.shape[0]:
import warnings
warnings.warn(
"{} of the time-series regressions cannot be estimated due to "
"deficient rank.".format(valid_blocks.shape[0] - valid_blocks.sum()),
MissingValueWarning,
stacklevel=3,
)
def fit(
self,
cov_type: str = "unadjusted",
debiased: bool = True,
bandwidth: float | None = None,
kernel: str | None = None,
) -> FamaMacBethResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator (see notes). Default is "unadjusted".
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
bandwidth : float
The bandwidth to use when cov_type is "kernel". If None, it is
automatically computed.
kernel : str
The kernel to use. None chooses the default kernel.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FamaMacBeth
>>> mod = FamaMacBeth(y, x)
>>> res = mod.fit(cov_type="kernel", kernel="Parzen")
Notes
-----
Two covariance estimators are supported:
* "unadjusted", "homoskedastic", "robust", "heteroskedastic" use the
standard covariance estimator of the T parameter estimates.
* "kernel" is a HAC estimator. Configurations options are:
"""
y = cast(Float64Array, self._y)
x = cast(Float64Array, self._x)
root_w = cast(Float64Array, np.sqrt(self._w))
wy = cast(Float64Array, root_w * y)
wx = cast(Float64Array, root_w * x)
dep = self.dependent.dataframe
exog = self.exog.dataframe
index = self.dependent.index
wy_df = DataFrame(wy[self._not_null], index=index, columns=dep.columns)
wx_df = DataFrame(
wx[self._not_null], index=exog.notnull().index, columns=exog.columns
)
yx = DataFrame(
np.c_[wy_df.values, wx_df.values],
columns=list(wy_df.columns) + list(wx_df.columns),
index=wy_df.index,
)
has_constant = self.has_constant
def single(z: DataFrame) -> Series:
exog = z.iloc[:, 1:].values
cols = list(z.columns) + ["r2", "adv_r2"]
if exog.shape[0] < exog.shape[1]:
return Series([np.nan] * (len(z.columns) + 2), index=cols)
dep = z.iloc[:, :1].values
params, _, rank, _ = _lstsq(exog, dep)
nexog = exog.shape[1]
if rank != nexog:
return Series([np.nan] * (len(z.columns) + 2), index=cols)
err = dep - exog @ params
sse = float(np.squeeze(err.T @ err))
if has_constant:
dep_demean = dep - dep.mean()
tss = float(np.squeeze(dep_demean.T @ dep_demean))
else:
tss = float(np.squeeze(dep.T @ dep))
r2 = 1 - sse / tss
nobs = exog.shape[0]
if nobs - nexog > 0:
adj_r2 = 1 - (sse / (nobs - nexog)) / (tss / (nobs - int(has_constant)))
else:
adj_r2 = np.nan
return Series(np.r_[np.nan, params.ravel(), r2, adj_r2], index=cols)
all_params = yx.groupby(level=1).apply(single)
avg_r2 = np.nanmean(all_params.iloc[:, -2])
avg_adj_r2_values = all_params.iloc[:, -1]
if np.any(np.isfinite(avg_adj_r2_values)):
avg_adj_r2 = np.nanmean(avg_adj_r2_values)
else:
avg_adj_r2 = np.nan
all_params = all_params.iloc[:, 1:-2]
params = np.asarray(all_params.mean(axis=0).values[:, None], dtype=float)
wy = np.asarray(wy_df)
wx = np.asarray(wx_df)
index = self.dependent.index
fitted = DataFrame(self.exog.values2d @ params, index, ["fitted_values"])
effects = DataFrame(np.full(fitted.shape, np.nan), index, ["estimated_effects"])
idiosyncratic = DataFrame(
self.dependent.values2d - fitted.values, index, ["idiosyncratic"]
)
eps = self.dependent.values2d - fitted.values
weps = wy - wx @ params
w = self.weights.values2d
root_w = cast(Float64Array, np.sqrt(w))
#
residual_ss = float(np.squeeze(weps.T @ weps))
y = e = self.dependent.values2d
if self.has_constant:
e = y - (w * y).sum() / w.sum()
total_ss = float(np.squeeze(w.T @ (e**2)))
r2 = 1 - residual_ss / total_ss
if cov_type not in (
"robust",
"unadjusted",
"homoskedastic",
"heteroskedastic",
"kernel",
):
raise ValueError("Unknown cov_type")
bandwidth = 0.0 if cov_type != "kernel" else bandwidth
cov = FamaMacBethCovariance(
wy,
wx,
params,
all_params,
debiased=debiased,
kernel=kernel,
bandwidth=bandwidth,
)
df_resid = wy.shape[0] - params.shape[0]
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
index = self.dependent.index
res.update(
dict(
df_resid=df_resid,
df_model=x.shape[1],
nobs=y.shape[0],
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
resids=eps,
wresids=weps,
index=index,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
all_params=all_params,
avg_r2=avg_r2,
avg_adj_r2=avg_adj_r2,
)
)
return FamaMacBethResults(res)
@classmethod
def from_formula(
cls,
formula: str,
data: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
) -> FamaMacBeth:
"""
Create a model from a formula
Parameters
----------
formula : str
Formula to transform into model. Conforms to formulaic formula
rules.
data : array_like
Data structure that can be coerced into a PanelData. In most
cases, this should be a multi-index DataFrame where the level 0
index contains the entities and the level 1 contains the time.
weights: array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual times
the weight should be homoskedastic.
check_rank : bool
Flag indicating whether to perform a rank check on the exogenous
variables to ensure that the model is identified. Skipping this
check can reduce the time required to validate a model
specification. Results may be numerically unstable if this check
is skipped and the matrix is not full rank.
Returns
-------
FamaMacBeth
Model specified using the formula
Notes
-----
Unlike standard formula syntax, it is necessary to explicitly include
a constant using the constant indicator (1)
Examples
--------
>>> from linearmodels import FamaMacBeth
>>> from linearmodels.panel import generate_panel_data
>>> panel_data = generate_panel_data()
>>> mod = FamaMacBeth.from_formula("y ~ 1 + x1", panel_data.data)
>>> res = mod.fit()
"""
parser = PanelFormulaParser(formula, data, context=capture_context(1))
dependent, exog = parser.data
mod = cls(dependent, exog, weights=weights, check_rank=check_rank)
mod.formula = formula
return mod
|
(dependent: 'PanelDataLike', exog: 'PanelDataLike', *, weights: 'PanelDataLike | None' = None, check_rank: 'bool' = True)
|
42,837 |
linearmodels.panel.model
|
__init__
| null |
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
):
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
self._validate_blocks()
|
(self, dependent: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], exog: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], *, weights: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, check_rank: bool = True)
|
42,849 |
linearmodels.panel.model
|
_setup_clusters
| null |
def _setup_clusters(
self,
cov_config: Mapping[str, bool | float | str | IntArray | DataFrame | PanelData],
) -> dict[str, bool | float | str | IntArray | DataFrame | PanelData]:
cov_config_upd = dict(cov_config)
cluster_types = ("clusters", "cluster_entity", "cluster_time")
common = set(cov_config.keys()).intersection(cluster_types)
if not common:
return cov_config_upd
cov_config_upd = {k: v for k, v in cov_config.items()}
clusters = get_panel_data_like(cov_config, "clusters")
clusters_frame: DataFrame | None = None
if clusters is not None:
formatted_clusters = self.reformat_clusters(clusters)
for col in formatted_clusters.dataframe:
cat = Categorical(formatted_clusters.dataframe[col])
# TODO: Bug in pandas-stubs
# https://github.com/pandas-dev/pandas-stubs/issues/111
formatted_clusters.dataframe[col] = cat.codes.astype(
np.int64
) # type: ignore
clusters_frame = formatted_clusters.dataframe
cluster_entity = bool(cov_config_upd.pop("cluster_entity", False))
if cluster_entity:
group_ids_arr = self.dependent.entity_ids.squeeze()
name = "cov.cluster.entity"
group_ids = Series(group_ids_arr, index=self.dependent.index, name=name)
if clusters_frame is not None:
clusters_frame[name] = group_ids
else:
clusters_frame = DataFrame(group_ids)
cluster_time = bool(cov_config_upd.pop("cluster_time", False))
if cluster_time:
group_ids_arr = self.dependent.time_ids.squeeze()
name = "cov.cluster.time"
group_ids = Series(group_ids_arr, index=self.dependent.index, name=name)
if clusters_frame is not None:
clusters_frame[name] = group_ids
else:
clusters_frame = DataFrame(group_ids)
if self._singleton_index is not None and clusters_frame is not None:
clusters_frame = clusters_frame.loc[~self._singleton_index]
if clusters_frame is not None:
cov_config_upd["clusters"] = np.asarray(clusters_frame)
return cov_config_upd
|
(self, cov_config: collections.abc.Mapping[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]) -> dict[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]
|
42,850 |
linearmodels.panel.model
|
_validate_blocks
| null |
def _validate_blocks(self) -> None:
x = self._x
root_w = np.sqrt(self._w)
wx = root_w * x
exog = self.exog.dataframe
wx_df = DataFrame(
wx[self._not_null], index=exog.notnull().index, columns=exog.columns
)
def validate_block(ex: Float64Array | DataFrame) -> bool:
_ex = np.asarray(ex, dtype=float)
def _mr(ex: Float64Array) -> int:
"""lstsq based matrix_rank"""
return _lstsq(ex, np.ones(ex.shape[0]))[2]
return _ex.shape[0] >= _ex.shape[1] and _mr(_ex) == _ex.shape[1]
valid_blocks = wx_df.groupby(level=1).apply(validate_block)
if not valid_blocks.any():
err = (
"Model cannot be estimated. All blocks of time-series observations "
"are rank deficient, and so it is not possible to estimate any"
"cross-sectional regressions."
)
raise ValueError(err)
if valid_blocks.sum() < exog.shape[1]:
import warnings
warnings.warn(
"The number of time-series observation available to estimate "
"cross-sectional\nregressions, {}, is less than the number of "
"parameters in the model. Parameter\ninference is not "
"available.".format(valid_blocks.sum()),
InferenceUnavailableWarning,
stacklevel=3,
)
elif valid_blocks.sum() < valid_blocks.shape[0]:
import warnings
warnings.warn(
"{} of the time-series regressions cannot be estimated due to "
"deficient rank.".format(valid_blocks.shape[0] - valid_blocks.sum()),
MissingValueWarning,
stacklevel=3,
)
|
(self) -> NoneType
|
42,852 |
linearmodels.panel.model
|
fit
|
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator (see notes). Default is "unadjusted".
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
bandwidth : float
The bandwidth to use when cov_type is "kernel". If None, it is
automatically computed.
kernel : str
The kernel to use. None chooses the default kernel.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FamaMacBeth
>>> mod = FamaMacBeth(y, x)
>>> res = mod.fit(cov_type="kernel", kernel="Parzen")
Notes
-----
Two covariance estimators are supported:
* "unadjusted", "homoskedastic", "robust", "heteroskedastic" use the
standard covariance estimator of the T parameter estimates.
* "kernel" is a HAC estimator. Configurations options are:
|
def fit(
self,
cov_type: str = "unadjusted",
debiased: bool = True,
bandwidth: float | None = None,
kernel: str | None = None,
) -> FamaMacBethResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator (see notes). Default is "unadjusted".
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
bandwidth : float
The bandwidth to use when cov_type is "kernel". If None, it is
automatically computed.
kernel : str
The kernel to use. None chooses the default kernel.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FamaMacBeth
>>> mod = FamaMacBeth(y, x)
>>> res = mod.fit(cov_type="kernel", kernel="Parzen")
Notes
-----
Two covariance estimators are supported:
* "unadjusted", "homoskedastic", "robust", "heteroskedastic" use the
standard covariance estimator of the T parameter estimates.
* "kernel" is a HAC estimator. Configurations options are:
"""
y = cast(Float64Array, self._y)
x = cast(Float64Array, self._x)
root_w = cast(Float64Array, np.sqrt(self._w))
wy = cast(Float64Array, root_w * y)
wx = cast(Float64Array, root_w * x)
dep = self.dependent.dataframe
exog = self.exog.dataframe
index = self.dependent.index
wy_df = DataFrame(wy[self._not_null], index=index, columns=dep.columns)
wx_df = DataFrame(
wx[self._not_null], index=exog.notnull().index, columns=exog.columns
)
yx = DataFrame(
np.c_[wy_df.values, wx_df.values],
columns=list(wy_df.columns) + list(wx_df.columns),
index=wy_df.index,
)
has_constant = self.has_constant
def single(z: DataFrame) -> Series:
exog = z.iloc[:, 1:].values
cols = list(z.columns) + ["r2", "adv_r2"]
if exog.shape[0] < exog.shape[1]:
return Series([np.nan] * (len(z.columns) + 2), index=cols)
dep = z.iloc[:, :1].values
params, _, rank, _ = _lstsq(exog, dep)
nexog = exog.shape[1]
if rank != nexog:
return Series([np.nan] * (len(z.columns) + 2), index=cols)
err = dep - exog @ params
sse = float(np.squeeze(err.T @ err))
if has_constant:
dep_demean = dep - dep.mean()
tss = float(np.squeeze(dep_demean.T @ dep_demean))
else:
tss = float(np.squeeze(dep.T @ dep))
r2 = 1 - sse / tss
nobs = exog.shape[0]
if nobs - nexog > 0:
adj_r2 = 1 - (sse / (nobs - nexog)) / (tss / (nobs - int(has_constant)))
else:
adj_r2 = np.nan
return Series(np.r_[np.nan, params.ravel(), r2, adj_r2], index=cols)
all_params = yx.groupby(level=1).apply(single)
avg_r2 = np.nanmean(all_params.iloc[:, -2])
avg_adj_r2_values = all_params.iloc[:, -1]
if np.any(np.isfinite(avg_adj_r2_values)):
avg_adj_r2 = np.nanmean(avg_adj_r2_values)
else:
avg_adj_r2 = np.nan
all_params = all_params.iloc[:, 1:-2]
params = np.asarray(all_params.mean(axis=0).values[:, None], dtype=float)
wy = np.asarray(wy_df)
wx = np.asarray(wx_df)
index = self.dependent.index
fitted = DataFrame(self.exog.values2d @ params, index, ["fitted_values"])
effects = DataFrame(np.full(fitted.shape, np.nan), index, ["estimated_effects"])
idiosyncratic = DataFrame(
self.dependent.values2d - fitted.values, index, ["idiosyncratic"]
)
eps = self.dependent.values2d - fitted.values
weps = wy - wx @ params
w = self.weights.values2d
root_w = cast(Float64Array, np.sqrt(w))
#
residual_ss = float(np.squeeze(weps.T @ weps))
y = e = self.dependent.values2d
if self.has_constant:
e = y - (w * y).sum() / w.sum()
total_ss = float(np.squeeze(w.T @ (e**2)))
r2 = 1 - residual_ss / total_ss
if cov_type not in (
"robust",
"unadjusted",
"homoskedastic",
"heteroskedastic",
"kernel",
):
raise ValueError("Unknown cov_type")
bandwidth = 0.0 if cov_type != "kernel" else bandwidth
cov = FamaMacBethCovariance(
wy,
wx,
params,
all_params,
debiased=debiased,
kernel=kernel,
bandwidth=bandwidth,
)
df_resid = wy.shape[0] - params.shape[0]
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
index = self.dependent.index
res.update(
dict(
df_resid=df_resid,
df_model=x.shape[1],
nobs=y.shape[0],
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
resids=eps,
wresids=weps,
index=index,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
all_params=all_params,
avg_r2=avg_r2,
avg_adj_r2=avg_adj_r2,
)
)
return FamaMacBethResults(res)
|
(self, cov_type: str = 'unadjusted', debiased: bool = True, bandwidth: Optional[float] = None, kernel: Optional[str] = None) -> linearmodels.panel.results.FamaMacBethResults
|
42,855 |
linearmodels.panel.model
|
FirstDifferenceOLS
|
First difference model for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
\Delta y_{it}=\beta^{\prime}\Delta x_{it}+\Delta\epsilon_{it}
|
class FirstDifferenceOLS(_PanelModelBase):
r"""
First difference model for panel data
Parameters
----------
dependent : array_like
Dependent (left-hand-side) variable (time by entity)
exog : array_like
Exogenous or right-hand-side variables (variable by time by entity).
weights : array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual time
the weight should be homoskedastic.
Notes
-----
The model is given by
.. math::
\Delta y_{it}=\beta^{\prime}\Delta x_{it}+\Delta\epsilon_{it}
"""
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
):
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
if self._constant:
raise ValueError(
"Constants are not allowed in first difference regressions."
)
if self.dependent.nobs < 2:
raise ValueError("Panel must have at least 2 time periods")
def _setup_clusters(
self,
cov_config: Mapping[str, bool | float | str | IntArray | DataFrame | PanelData],
) -> dict[str, bool | float | str | IntArray | DataFrame | PanelData]:
cov_config_upd = dict(cov_config).copy()
cluster_types = ("clusters", "cluster_entity")
common = set(cov_config.keys()).intersection(cluster_types)
if not common:
return cov_config_upd
clusters = cov_config.get("clusters", None)
clusters_frame: DataFrame | None = None
if clusters is not None:
cluster_data = cast(Union[IntArray, DataFrame, PanelData], clusters)
clusters_panel = self.reformat_clusters(cluster_data)
fd = clusters_panel.first_difference()
fd_array = fd.values2d
if np.any(fd_array.flat[np.isfinite(fd_array.flat)] != 0):
raise ValueError(
"clusters must be identical for values used "
"to compute the first difference"
)
clusters_frame = clusters_panel.dataframe.copy()
cluster_entity = cov_config_upd.pop("cluster_entity", False)
if cluster_entity:
group_ids = self.dependent.entity_ids.squeeze()
name = "cov.cluster.entity"
group_ids_s = Series(group_ids, index=self.dependent.index, name=name)
if clusters_frame is not None:
clusters_frame[name] = group_ids_s
else:
clusters_frame = DataFrame(group_ids_s)
assert clusters_frame is not None
cluster_data = PanelData(clusters_frame)
values = cluster_data.values3d[:, 1:]
cluster_frame = panel_to_frame(
values,
cluster_data.panel.items,
cluster_data.panel.major_axis[1:],
cluster_data.panel.minor_axis,
True,
)
cluster_frame = PanelData(cluster_frame).dataframe
cluster_frame = cluster_frame.loc[self.dependent.first_difference().index]
cluster_frame = cluster_frame.astype(np.int64)
cov_config_upd["clusters"] = (
cluster_frame.values if cluster_frame is not None else None
)
return cov_config_upd
def fit(
self,
*,
cov_type: str = "unadjusted",
debiased: bool = True,
**cov_config: bool | float | str | IntArray | DataFrame | PanelData,
) -> PanelResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FirstDifferenceOLS
>>> mod = FirstDifferenceOLS(y, x)
>>> robust = mod.fit(cov_type="robust")
>>> clustered = mod.fit(cov_type="clustered", cluster_entity=True)
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - White's. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
* ``cluster_entity`` - Boolean flag indicating to use entity
clusters
* "kernel" - Driscoll-Kraay HAC estimator. Configurations options are:
* ``kernel`` - One of the supported kernels (bartlett, parzen, qs).
Default is Bartlett's kernel, which is produces a covariance
estimator similar to the Newey-West covariance estimator.
* ``bandwidth`` - Bandwidth to use when computing the kernel. If
not provided, a naive default is used.
When using a clustered covariance estimator, all cluster ids must be
identical within a first difference. In most scenarios, this requires
ids to be identical within an entity.
"""
y_fd = self.dependent.first_difference()
time_ids = y_fd.time_ids
entity_ids = y_fd.entity_ids
index = y_fd.index
y = cast(Float64Array, y_fd.values2d)
x = cast(Float64Array, self.exog.first_difference().values2d)
if np.all(self.weights.values2d == 1.0):
w = root_w = np.ones_like(y)
else:
w = cast(Float64Array, 1.0 / self.weights.values3d)
w = w[:, :-1] + w[:, 1:]
w = cast(Float64Array, 1.0 / w)
w_frame = panel_to_frame(
w,
self.weights.panel.items,
self.weights.panel.major_axis[1:],
self.weights.panel.minor_axis,
True,
)
w_frame = w_frame.reindex(self.weights.index).dropna(how="any")
index = cast(MultiIndex, w_frame.index)
w = np.require(w_frame, requirements="W")
w /= w.mean()
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
params = _lstsq(wx, wy, rcond=None)[0]
df_resid = y.shape[0] - x.shape[1]
cov_config = self._setup_clusters(cov_config)
extra_df = 0
if "extra_df" in cov_config:
cov_config = cov_config.copy()
_extra_df = cov_config.pop("extra_df")
assert isinstance(_extra_df, (str, int))
extra_df = int(_extra_df)
cov = setup_covariance_estimator(
self._cov_estimators,
cov_type,
wy,
wx,
params,
entity_ids,
time_ids,
debiased=debiased,
extra_df=extra_df,
**cov_config,
)
weps = wy - wx @ params
fitted = DataFrame(
self.exog.values2d @ params, self.dependent.index, ["fitted_values"]
)
idiosyncratic = DataFrame(
self.dependent.values2d - fitted.values,
self.dependent.index,
["idiosyncratic"],
)
effects = DataFrame(
np.full_like(np.asarray(fitted), np.nan),
self.dependent.index,
["estimated_effects"],
)
eps = y - x @ params
residual_ss = float(np.squeeze(weps.T @ weps))
total_ss = float(np.squeeze(w.T @ (y**2)))
r2 = 1 - residual_ss / total_ss
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
res.update(
dict(
df_resid=df_resid,
df_model=x.shape[1],
nobs=y.shape[0],
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
resids=eps,
wresids=weps,
index=index,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
)
)
return PanelResults(res)
@classmethod
def from_formula(
cls,
formula: str,
data: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
) -> FirstDifferenceOLS:
"""
Create a model from a formula
Parameters
----------
formula : str
Formula to transform into model. Conforms to formulaic formula
rules.
data : array_like
Data structure that can be coerced into a PanelData. In most
cases, this should be a multi-index DataFrame where the level 0
index contains the entities and the level 1 contains the time.
weights: array_like
Weights to use in estimation. Assumes residual variance is
proportional to inverse of weight to that the residual times
the weight should be homoskedastic.
check_rank : bool
Flag indicating whether to perform a rank check on the exogenous
variables to ensure that the model is identified. Skipping this
check can reduce the time required to validate a model
specification. Results may be numerically unstable if this check
is skipped and the matrix is not full rank.
Returns
-------
FirstDifferenceOLS
Model specified using the formula
Notes
-----
Unlike standard formula syntax, it is necessary to explicitly include
a constant using the constant indicator (1)
Examples
--------
>>> from linearmodels import FirstDifferenceOLS
>>> from linearmodels.panel import generate_panel_data
>>> panel_data = generate_panel_data()
>>> mod = FirstDifferenceOLS.from_formula("y ~ x1", panel_data.data)
>>> res = mod.fit()
"""
parser = PanelFormulaParser(formula, data, context=capture_context(1))
dependent, exog = parser.data
mod = cls(dependent, exog, weights=weights, check_rank=check_rank)
mod.formula = formula
return mod
|
(dependent: 'PanelDataLike', exog: 'PanelDataLike', *, weights: 'PanelDataLike | None' = None, check_rank: 'bool' = True)
|
42,856 |
linearmodels.panel.model
|
__init__
| null |
def __init__(
self,
dependent: PanelDataLike,
exog: PanelDataLike,
*,
weights: PanelDataLike | None = None,
check_rank: bool = True,
):
super().__init__(dependent, exog, weights=weights, check_rank=check_rank)
if self._constant:
raise ValueError(
"Constants are not allowed in first difference regressions."
)
if self.dependent.nobs < 2:
raise ValueError("Panel must have at least 2 time periods")
|
(self, dependent: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], exog: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], *, weights: Union[linearmodels.panel.data.PanelData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, check_rank: bool = True)
|
42,868 |
linearmodels.panel.model
|
_setup_clusters
| null |
def _setup_clusters(
self,
cov_config: Mapping[str, bool | float | str | IntArray | DataFrame | PanelData],
) -> dict[str, bool | float | str | IntArray | DataFrame | PanelData]:
cov_config_upd = dict(cov_config).copy()
cluster_types = ("clusters", "cluster_entity")
common = set(cov_config.keys()).intersection(cluster_types)
if not common:
return cov_config_upd
clusters = cov_config.get("clusters", None)
clusters_frame: DataFrame | None = None
if clusters is not None:
cluster_data = cast(Union[IntArray, DataFrame, PanelData], clusters)
clusters_panel = self.reformat_clusters(cluster_data)
fd = clusters_panel.first_difference()
fd_array = fd.values2d
if np.any(fd_array.flat[np.isfinite(fd_array.flat)] != 0):
raise ValueError(
"clusters must be identical for values used "
"to compute the first difference"
)
clusters_frame = clusters_panel.dataframe.copy()
cluster_entity = cov_config_upd.pop("cluster_entity", False)
if cluster_entity:
group_ids = self.dependent.entity_ids.squeeze()
name = "cov.cluster.entity"
group_ids_s = Series(group_ids, index=self.dependent.index, name=name)
if clusters_frame is not None:
clusters_frame[name] = group_ids_s
else:
clusters_frame = DataFrame(group_ids_s)
assert clusters_frame is not None
cluster_data = PanelData(clusters_frame)
values = cluster_data.values3d[:, 1:]
cluster_frame = panel_to_frame(
values,
cluster_data.panel.items,
cluster_data.panel.major_axis[1:],
cluster_data.panel.minor_axis,
True,
)
cluster_frame = PanelData(cluster_frame).dataframe
cluster_frame = cluster_frame.loc[self.dependent.first_difference().index]
cluster_frame = cluster_frame.astype(np.int64)
cov_config_upd["clusters"] = (
cluster_frame.values if cluster_frame is not None else None
)
return cov_config_upd
|
(self, cov_config: collections.abc.Mapping[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]) -> dict[str, bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData]
|
42,870 |
linearmodels.panel.model
|
fit
|
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FirstDifferenceOLS
>>> mod = FirstDifferenceOLS(y, x)
>>> robust = mod.fit(cov_type="robust")
>>> clustered = mod.fit(cov_type="clustered", cluster_entity=True)
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - White's. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
* ``cluster_entity`` - Boolean flag indicating to use entity
clusters
* "kernel" - Driscoll-Kraay HAC estimator. Configurations options are:
* ``kernel`` - One of the supported kernels (bartlett, parzen, qs).
Default is Bartlett's kernel, which is produces a covariance
estimator similar to the Newey-West covariance estimator.
* ``bandwidth`` - Bandwidth to use when computing the kernel. If
not provided, a naive default is used.
When using a clustered covariance estimator, all cluster ids must be
identical within a first difference. In most scenarios, this requires
ids to be identical within an entity.
|
def fit(
self,
*,
cov_type: str = "unadjusted",
debiased: bool = True,
**cov_config: bool | float | str | IntArray | DataFrame | PanelData,
) -> PanelResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator. See Notes.
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional covariance-specific options. See Notes.
Returns
-------
PanelResults
Estimation results
Examples
--------
>>> from linearmodels import FirstDifferenceOLS
>>> mod = FirstDifferenceOLS(y, x)
>>> robust = mod.fit(cov_type="robust")
>>> clustered = mod.fit(cov_type="clustered", cluster_entity=True)
Notes
-----
Three covariance estimators are supported:
* "unadjusted", "homoskedastic" - Assume residual are homoskedastic
* "robust", "heteroskedastic" - Control for heteroskedasticity using
White's estimator
* "clustered` - White's. Configuration options are:
* ``clusters`` - Input containing 1 or 2 variables.
Clusters should be integer values, although other types will
be coerced to integer values by treating as categorical variables
* ``cluster_entity`` - Boolean flag indicating to use entity
clusters
* "kernel" - Driscoll-Kraay HAC estimator. Configurations options are:
* ``kernel`` - One of the supported kernels (bartlett, parzen, qs).
Default is Bartlett's kernel, which is produces a covariance
estimator similar to the Newey-West covariance estimator.
* ``bandwidth`` - Bandwidth to use when computing the kernel. If
not provided, a naive default is used.
When using a clustered covariance estimator, all cluster ids must be
identical within a first difference. In most scenarios, this requires
ids to be identical within an entity.
"""
y_fd = self.dependent.first_difference()
time_ids = y_fd.time_ids
entity_ids = y_fd.entity_ids
index = y_fd.index
y = cast(Float64Array, y_fd.values2d)
x = cast(Float64Array, self.exog.first_difference().values2d)
if np.all(self.weights.values2d == 1.0):
w = root_w = np.ones_like(y)
else:
w = cast(Float64Array, 1.0 / self.weights.values3d)
w = w[:, :-1] + w[:, 1:]
w = cast(Float64Array, 1.0 / w)
w_frame = panel_to_frame(
w,
self.weights.panel.items,
self.weights.panel.major_axis[1:],
self.weights.panel.minor_axis,
True,
)
w_frame = w_frame.reindex(self.weights.index).dropna(how="any")
index = cast(MultiIndex, w_frame.index)
w = np.require(w_frame, requirements="W")
w /= w.mean()
root_w = cast(Float64Array, np.sqrt(w))
wx = root_w * x
wy = root_w * y
params = _lstsq(wx, wy, rcond=None)[0]
df_resid = y.shape[0] - x.shape[1]
cov_config = self._setup_clusters(cov_config)
extra_df = 0
if "extra_df" in cov_config:
cov_config = cov_config.copy()
_extra_df = cov_config.pop("extra_df")
assert isinstance(_extra_df, (str, int))
extra_df = int(_extra_df)
cov = setup_covariance_estimator(
self._cov_estimators,
cov_type,
wy,
wx,
params,
entity_ids,
time_ids,
debiased=debiased,
extra_df=extra_df,
**cov_config,
)
weps = wy - wx @ params
fitted = DataFrame(
self.exog.values2d @ params, self.dependent.index, ["fitted_values"]
)
idiosyncratic = DataFrame(
self.dependent.values2d - fitted.values,
self.dependent.index,
["idiosyncratic"],
)
effects = DataFrame(
np.full_like(np.asarray(fitted), np.nan),
self.dependent.index,
["estimated_effects"],
)
eps = y - x @ params
residual_ss = float(np.squeeze(weps.T @ weps))
total_ss = float(np.squeeze(w.T @ (y**2)))
r2 = 1 - residual_ss / total_ss
res = self._postestimation(
params, cov, debiased, df_resid, weps, wy, wx, root_w
)
res.update(
dict(
df_resid=df_resid,
df_model=x.shape[1],
nobs=y.shape[0],
residual_ss=residual_ss,
total_ss=total_ss,
r2=r2,
resids=eps,
wresids=weps,
index=index,
fitted=fitted,
effects=effects,
idiosyncratic=idiosyncratic,
)
)
return PanelResults(res)
|
(self, *, cov_type: str = 'unadjusted', debiased: bool = True, **cov_config: bool | float | str | numpy.ndarray | pandas.core.frame.DataFrame | linearmodels.panel.data.PanelData) -> linearmodels.panel.results.PanelResults
|
42,873 |
linearmodels.iv.model
|
IV2SLS
|
Estimation of IV models using two-stage least squares
Parameters
----------
dependent : array_like
Endogenous variables (nobs by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
endog : array_like
Endogenous regressors (nobs by nendog)
instruments : array_like
Instrumental variables (nobs by ninstr)
weights : array_like
Observation weights used in estimation
Notes
-----
The 2SLS estimator is defined
.. math::
\hat{\beta}_{2SLS} & =(X'Z(Z'Z)^{-1}Z'X)^{-1}X'Z(Z'Z)^{-1}Z'Y\\
& =(\hat{X}'\hat{X})^{-1}\hat{X}'Y\\
\hat{X} & =Z(Z'Z)^{-1}Z'X
The 2SLS estimator is a special case of a k-class estimator with
:math:`\kappa=1`,
.. todo::
* VCV: bootstrap
See Also
--------
IVLIML, IVGMM, IVGMMCUE
|
class IV2SLS(_IVLSModelBase):
r"""
Estimation of IV models using two-stage least squares
Parameters
----------
dependent : array_like
Endogenous variables (nobs by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
endog : array_like
Endogenous regressors (nobs by nendog)
instruments : array_like
Instrumental variables (nobs by ninstr)
weights : array_like
Observation weights used in estimation
Notes
-----
The 2SLS estimator is defined
.. math::
\hat{\beta}_{2SLS} & =(X'Z(Z'Z)^{-1}Z'X)^{-1}X'Z(Z'Z)^{-1}Z'Y\\
& =(\hat{X}'\hat{X})^{-1}\hat{X}'Y\\
\hat{X} & =Z(Z'Z)^{-1}Z'X
The 2SLS estimator is a special case of a k-class estimator with
:math:`\kappa=1`,
.. todo::
* VCV: bootstrap
See Also
--------
IVLIML, IVGMM, IVGMMCUE
"""
def __init__(
self,
dependent: IVDataLike,
exog: IVDataLike | None,
endog: IVDataLike | None,
instruments: IVDataLike | None,
*,
weights: IVDataLike | None = None,
):
self._method = "IV-2SLS"
super().__init__(
dependent, exog, endog, instruments, weights=weights, fuller=0, kappa=1
)
@staticmethod
def from_formula(
formula: str, data: DataFrame, *, weights: IVDataLike | None = None
) -> IV2SLS:
"""
Parameters
----------
formula : str
Formula modified for the IV syntax described in the notes
section
data : DataFrame
DataFrame containing the variables used in the formula
weights : array_like
Observation weights used in estimation
Returns
-------
IV2SLS
Model instance
Notes
-----
The IV formula modifies the standard formula syntax to include a
block of the form [endog ~ instruments] which is used to indicate
the list of endogenous variables and instruments. The general
structure is `dependent ~ exog [endog ~ instruments]` and it must
be the case that the formula expressions constructed from blocks
`dependent ~ exog endog` and `dependent ~ exog instruments` are both
valid formulas.
A constant must be explicitly included using "1 +" if required.
Examples
--------
>>> import numpy as np
>>> from linearmodels.datasets import wage
>>> from linearmodels.iv import IV2SLS
>>> data = wage.load()
>>> formula = 'np.log(wage) ~ 1 + exper + exper ** 2 + brthord + [educ ~ sibs]'
>>> mod = IV2SLS.from_formula(formula, data)
"""
parser = IVFormulaParser(formula, data)
dep, exog, endog, instr = parser.data
mod = IV2SLS(dep, exog, endog, instr, weights=weights)
mod.formula = formula
return mod
|
(dependent: 'IVDataLike', exog: 'IVDataLike | None', endog: 'IVDataLike | None', instruments: 'IVDataLike | None', *, weights: 'IVDataLike | None' = None)
|
42,874 |
linearmodels.iv.model
|
__init__
| null |
def __init__(
self,
dependent: IVDataLike,
exog: IVDataLike | None,
endog: IVDataLike | None,
instruments: IVDataLike | None,
*,
weights: IVDataLike | None = None,
):
self._method = "IV-2SLS"
super().__init__(
dependent, exog, endog, instruments, weights=weights, fuller=0, kappa=1
)
|
(self, dependent: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], exog: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType], endog: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType], instruments: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType], *, weights: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None)
|
42,875 |
linearmodels.iv.model
|
_drop_missing
| null |
def _drop_missing(self) -> BoolArray:
data = (self.dependent, self.exog, self.endog, self.instruments, self.weights)
missing = cast(
BoolArray, npany(column_stack([dh.isnull for dh in data]), axis=1)
)
if npany(missing):
if npall(missing):
raise ValueError(
"All observations contain missing data. "
"Model cannot be estimated."
)
self.dependent.drop(missing)
self.exog.drop(missing)
self.endog.drop(missing)
self.instruments.drop(missing)
self.weights.drop(missing)
missing_warning(missing, stacklevel=4)
return missing
|
(self) -> numpy.ndarray
|
42,876 |
linearmodels.iv.model
|
_estimate_kappa
| null |
def _estimate_kappa(self) -> float:
y, x, z = self._wy, self._wx, self._wz
is_exog = self._regressor_is_exog
e = c_[y, x[:, ~is_exog]]
x1 = x[:, is_exog]
ez = e - z @ (pinv(z) @ e)
if x1.shape[1] == 0: # No exogenous regressors
ex1 = e
else:
ex1 = e - x1 @ (pinv(x1) @ e)
vpmzv_sqinv = inv_sqrth(ez.T @ ez)
q = vpmzv_sqinv @ (ex1.T @ ex1) @ vpmzv_sqinv
return min(eigvalsh(q))
|
(self) -> float
|
42,877 |
linearmodels.iv.model
|
_f_statistic
| null |
def _f_statistic(
self, params: Float64Array, cov: Float64Array, debiased: bool
) -> WaldTestStatistic | InvalidTestStatistic:
const_loc = find_constant(self._x)
nobs, nvar = self._x.shape
return f_statistic(params, cov, debiased, nobs - nvar, const_loc)
|
(self, params: numpy.ndarray, cov: numpy.ndarray, debiased: bool) -> linearmodels.shared.hypotheses.WaldTestStatistic | linearmodels.shared.hypotheses.InvalidTestStatistic
|
42,878 |
linearmodels.iv.model
|
_post_estimation
| null |
def _post_estimation(
self, params: Float64Array, cov_estimator: CovarianceEstimator, cov_type: str
) -> dict[str, Any]:
columns = self._columns
index = self._index
eps = self.resids(params)
y = self.dependent.pandas
fitted = DataFrameWrapper(
asarray(y) - eps, index=y.index, columns=["fitted_values"]
)
weps = self.wresids(params)
cov = cov_estimator.cov
debiased = cov_estimator.debiased
residual_ss = squeeze(weps.T @ weps)
w = self.weights.ndarray
e = self._wy
if self.has_constant:
e = e - sqrt(self.weights.ndarray) * average(self._y, weights=w)
total_ss = float(squeeze(e.T @ e))
r2 = 1 - residual_ss / total_ss
fstat = self._f_statistic(params, cov, debiased)
out = {
"params": Series(params.squeeze(), columns, name="parameter"),
"eps": SeriesWrapper(eps.squeeze(), index=index, name="residual"),
"weps": SeriesWrapper(
weps.squeeze(), index=index, name="weighted residual"
),
"cov": DataFrame(cov, columns=columns, index=columns),
"s2": float(squeeze(cov_estimator.s2)),
"debiased": debiased,
"residual_ss": float(residual_ss),
"total_ss": float(total_ss),
"r2": float(squeeze(r2)),
"fstat": fstat,
"vars": columns,
"instruments": self._instr_columns,
"cov_config": cov_estimator.config,
"cov_type": cov_type,
"method": self._method,
"cov_estimator": cov_estimator,
"fitted": fitted,
"original_index": self._original_index,
}
return out
|
(self, params: numpy.ndarray, cov_estimator: ~CovarianceEstimator, cov_type: str) -> dict[str, typing.Any]
|
42,879 |
linearmodels.iv.model
|
_validate_inputs
| null |
def _validate_inputs(self) -> None:
x, z = self._x, self._z
if x.shape[1] == 0:
raise ValueError("Model must contain at least one regressor.")
if self.instruments.shape[1] < self.endog.shape[1]:
raise ValueError(
"The number of instruments ({}) must be at least "
"as large as the number of endogenous regressors"
" ({}).".format(self.instruments.shape[1], self.endog.shape[1])
)
if matrix_rank(x) < x.shape[1]:
raise ValueError("regressors [exog endog] do not have full " "column rank")
if matrix_rank(z) < z.shape[1]:
raise ValueError(
"instruments [exog instruments] do not have " "full column rank"
)
self._has_constant, self._const_loc = has_constant(x)
|
(self) -> NoneType
|
42,880 |
linearmodels.iv.model
|
estimate_parameters
|
Parameter estimation without error checking
Parameters
----------
x : ndarray
Regressor matrix (nobs by nvar)
y : ndarray
Regressand matrix (nobs by 1)
z : ndarray
Instrument matrix (nobs by ninstr)
kappa : scalar
Parameter value for k-class estimator
Returns
-------
ndarray
Estimated parameters (nvar by 1)
Notes
-----
Exposed as a static method to facilitate estimation with other data,
e.g., bootstrapped samples. Performs no error checking.
|
@staticmethod
def estimate_parameters(
x: Float64Array, y: Float64Array, z: Float64Array, kappa: Numeric
) -> Float64Array:
"""
Parameter estimation without error checking
Parameters
----------
x : ndarray
Regressor matrix (nobs by nvar)
y : ndarray
Regressand matrix (nobs by 1)
z : ndarray
Instrument matrix (nobs by ninstr)
kappa : scalar
Parameter value for k-class estimator
Returns
-------
ndarray
Estimated parameters (nvar by 1)
Notes
-----
Exposed as a static method to facilitate estimation with other data,
e.g., bootstrapped samples. Performs no error checking.
"""
pinvz = pinv(z)
p1 = (x.T @ x) * (1 - kappa) + kappa * ((x.T @ z) @ (pinvz @ x))
p2 = (x.T @ y) * (1 - kappa) + kappa * ((x.T @ z) @ (pinvz @ y))
return inv(p1) @ p2
|
(x: numpy.ndarray, y: numpy.ndarray, z: numpy.ndarray, kappa: Union[int, float]) -> numpy.ndarray
|
42,881 |
linearmodels.iv.model
|
fit
|
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator to use. Supported covariance
estimators are:
* "unadjusted", "homoskedastic" - Classic homoskedastic inference
* "robust", "heteroskedastic" - Heteroskedasticity robust inference
* "kernel" - Heteroskedasticity and autocorrelation robust
inference
* "cluster" - One-way cluster dependent inference.
Heteroskedasticity robust
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional parameters to pass to covariance estimator. The list
of optional parameters differ according to ``cov_type``. See
the documentation of the alternative covariance estimators for
the complete list of available commands.
Returns
-------
IVResults
Results container
Notes
-----
Additional covariance parameters depend on specific covariance used.
The see the docstring of specific covariance estimator for a list of
supported options. Defaults are used if no covariance configuration
is provided.
See also
--------
linearmodels.iv.covariance.HomoskedasticCovariance
linearmodels.iv.covariance.HeteroskedasticCovariance
linearmodels.iv.covariance.KernelCovariance
linearmodels.iv.covariance.ClusteredCovariance
|
def fit(
self, *, cov_type: str = "robust", debiased: bool = False, **cov_config: Any
) -> OLSResults | IVResults:
"""
Estimate model parameters
Parameters
----------
cov_type : str
Name of covariance estimator to use. Supported covariance
estimators are:
* "unadjusted", "homoskedastic" - Classic homoskedastic inference
* "robust", "heteroskedastic" - Heteroskedasticity robust inference
* "kernel" - Heteroskedasticity and autocorrelation robust
inference
* "cluster" - One-way cluster dependent inference.
Heteroskedasticity robust
debiased : bool
Flag indicating whether to debiased the covariance estimator using
a degree of freedom adjustment.
**cov_config
Additional parameters to pass to covariance estimator. The list
of optional parameters differ according to ``cov_type``. See
the documentation of the alternative covariance estimators for
the complete list of available commands.
Returns
-------
IVResults
Results container
Notes
-----
Additional covariance parameters depend on specific covariance used.
The see the docstring of specific covariance estimator for a list of
supported options. Defaults are used if no covariance configuration
is provided.
See also
--------
linearmodels.iv.covariance.HomoskedasticCovariance
linearmodels.iv.covariance.HeteroskedasticCovariance
linearmodels.iv.covariance.KernelCovariance
linearmodels.iv.covariance.ClusteredCovariance
"""
wy, wx, wz = self._wy, self._wx, self._wz
kappa = self._kappa
try:
liml_kappa: float = self._estimate_kappa()
except Exception as exc:
liml_kappa = nan
if kappa is None:
raise ValueError(
"Unable to estimate kappa. This is most likely occurs if the "
f"instrument matrix is rank deficient. The error raised when "
f"computing kappa was:\n\n{exc}"
)
if kappa is not None:
est_kappa = kappa
else:
est_kappa = liml_kappa
if self._fuller != 0:
nobs, ninstr = wz.shape
est_kappa -= self._fuller / (nobs - ninstr)
params = self.estimate_parameters(wx, wy, wz, est_kappa)
cov_estimator = COVARIANCE_ESTIMATORS[cov_type]
cov_config["debiased"] = debiased
cov_config["kappa"] = est_kappa
cov_config_copy = {k: v for k, v in cov_config.items()}
if "center" in cov_config_copy:
del cov_config_copy["center"]
cov_estimator_inst = cov_estimator(wx, wy, wz, params, **cov_config_copy)
results = {"kappa": est_kappa, "liml_kappa": liml_kappa}
pe = self._post_estimation(params, cov_estimator_inst, cov_type)
results.update(pe)
if self.endog.shape[1] == 0 and self.instruments.shape[1] == 0:
return OLSResults(results, self)
else:
return IVResults(results, self)
|
(self, *, cov_type: str = 'robust', debiased: bool = False, **cov_config: Any) -> linearmodels.iv.results.OLSResults | linearmodels.iv.results.IVResults
|
42,882 |
linearmodels.iv.model
|
from_formula
|
Parameters
----------
formula : str
Formula modified for the IV syntax described in the notes
section
data : DataFrame
DataFrame containing the variables used in the formula
weights : array_like
Observation weights used in estimation
Returns
-------
IV2SLS
Model instance
Notes
-----
The IV formula modifies the standard formula syntax to include a
block of the form [endog ~ instruments] which is used to indicate
the list of endogenous variables and instruments. The general
structure is `dependent ~ exog [endog ~ instruments]` and it must
be the case that the formula expressions constructed from blocks
`dependent ~ exog endog` and `dependent ~ exog instruments` are both
valid formulas.
A constant must be explicitly included using "1 +" if required.
Examples
--------
>>> import numpy as np
>>> from linearmodels.datasets import wage
>>> from linearmodels.iv import IV2SLS
>>> data = wage.load()
>>> formula = 'np.log(wage) ~ 1 + exper + exper ** 2 + brthord + [educ ~ sibs]'
>>> mod = IV2SLS.from_formula(formula, data)
|
@staticmethod
def from_formula(
formula: str, data: DataFrame, *, weights: IVDataLike | None = None
) -> IV2SLS:
"""
Parameters
----------
formula : str
Formula modified for the IV syntax described in the notes
section
data : DataFrame
DataFrame containing the variables used in the formula
weights : array_like
Observation weights used in estimation
Returns
-------
IV2SLS
Model instance
Notes
-----
The IV formula modifies the standard formula syntax to include a
block of the form [endog ~ instruments] which is used to indicate
the list of endogenous variables and instruments. The general
structure is `dependent ~ exog [endog ~ instruments]` and it must
be the case that the formula expressions constructed from blocks
`dependent ~ exog endog` and `dependent ~ exog instruments` are both
valid formulas.
A constant must be explicitly included using "1 +" if required.
Examples
--------
>>> import numpy as np
>>> from linearmodels.datasets import wage
>>> from linearmodels.iv import IV2SLS
>>> data = wage.load()
>>> formula = 'np.log(wage) ~ 1 + exper + exper ** 2 + brthord + [educ ~ sibs]'
>>> mod = IV2SLS.from_formula(formula, data)
"""
parser = IVFormulaParser(formula, data)
dep, exog, endog, instr = parser.data
mod = IV2SLS(dep, exog, endog, instr, weights=weights)
mod.formula = formula
return mod
|
(formula: str, data: pandas.core.frame.DataFrame, *, weights: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None) -> linearmodels.iv.model.IV2SLS
|
42,883 |
linearmodels.iv.model
|
predict
|
Predict values for additional data
Parameters
----------
params : array_like
Model parameters (nvar by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
endog : array_like
Endogenous regressors (nobs by nendog)
data : DataFrame
Values to use when making predictions from a model constructed
from a formula
eval_env : int
Depth of use when evaluating formulas.
Returns
-------
DataFrame
Fitted values from supplied data and parameters
Notes
-----
The number of parameters must satisfy nvar = nexog + nendog.
When using `exog` and `endog`, regressor matrix is constructed as
`[exog, endog]` and so parameters must be aligned to this structure.
The the the same structure used in model estimation.
If `data` is not none, then `exog` and `endog` must be none.
Predictions from models constructed using formulas can
be computed using either `exog` and `endog`, which will treat these are
arrays of values corresponding to the formula-processed data, or using
`data` which will be processed using the formula used to construct the
values corresponding to the original model specification.
|
def predict(
self,
params: ArrayLike,
*,
exog: IVDataLike | None = None,
endog: IVDataLike | None = None,
data: DataFrame | None = None,
eval_env: int = 4,
) -> DataFrame:
"""
Predict values for additional data
Parameters
----------
params : array_like
Model parameters (nvar by 1)
exog : array_like
Exogenous regressors (nobs by nexog)
endog : array_like
Endogenous regressors (nobs by nendog)
data : DataFrame
Values to use when making predictions from a model constructed
from a formula
eval_env : int
Depth of use when evaluating formulas.
Returns
-------
DataFrame
Fitted values from supplied data and parameters
Notes
-----
The number of parameters must satisfy nvar = nexog + nendog.
When using `exog` and `endog`, regressor matrix is constructed as
`[exog, endog]` and so parameters must be aligned to this structure.
The the the same structure used in model estimation.
If `data` is not none, then `exog` and `endog` must be none.
Predictions from models constructed using formulas can
be computed using either `exog` and `endog`, which will treat these are
arrays of values corresponding to the formula-processed data, or using
`data` which will be processed using the formula used to construct the
values corresponding to the original model specification.
"""
if data is not None and not self.formula:
raise ValueError(
"Unable to use data when the model was not " "created using a formula."
)
if data is not None and (exog is not None or endog is not None):
raise ValueError(
"Predictions can only be constructed using one "
"of exog/endog or data, but not both."
)
if exog is not None or endog is not None:
exog = IVData(exog).pandas
endog = IVData(endog).pandas
elif data is not None:
parser = IVFormulaParser(self.formula, data, eval_env=eval_env)
exog = parser.exog
endog = parser.endog
else:
raise ValueError("exog and endog or data must be provided.")
assert exog is not None
assert endog is not None
if exog.shape[0] != endog.shape[0]:
raise ValueError("exog and endog must have the same number of rows.")
if (exog.index != endog.index).any():
warnings.warn(
"The indices of exog and endog do not match. Predictions created "
"using the index of exog.",
IndexWarning,
stacklevel=2,
)
exog_endog = concat([exog, endog], axis=1)
x = asarray(exog_endog)
params = atleast_2d(asarray(params))
if params.shape[0] == 1:
params = params.T
pred = DataFrame(x @ params, index=exog_endog.index, columns=["predictions"])
return pred
|
(self, params: Union[numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series], *, exog: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, endog: Union[linearmodels.iv.data.IVData, numpy.ndarray, pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, data: Optional[pandas.core.frame.DataFrame] = None, eval_env: int = 4) -> pandas.core.frame.DataFrame
|
42,884 |
linearmodels.iv.model
|
resids
|
Compute model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Model residuals
|
def resids(self, params: Float64Array) -> Float64Array:
"""
Compute model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Model residuals
"""
return self._y - self._x @ params
|
(self, params: numpy.ndarray) -> numpy.ndarray
|
42,885 |
linearmodels.iv.model
|
wresids
|
Compute weighted model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Weighted model residuals
Notes
-----
Uses weighted versions of data instead of raw data. Identical to
resids if all weights are unity.
|
def wresids(self, params: Float64Array) -> Float64Array:
"""
Compute weighted model residuals
Parameters
----------
params : ndarray
Model parameters (nvar by 1)
Returns
-------
ndarray
Weighted model residuals
Notes
-----
Uses weighted versions of data instead of raw data. Identical to
resids if all weights are unity.
"""
return self._wy - self._wx @ params
|
(self, params: numpy.ndarray) -> numpy.ndarray
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.